Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module gluon.dal

    1  #!/bin/env python 
    2  # -*- coding: utf-8 -*- 
    3   
    4  """ 
    5  This file is part of the web2py Web Framework 
    6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
    7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
    8   
    9  Thanks to 
   10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
   11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
   12      * Denes 
   13      * Chris Clark 
   14      * clach05 
   15      * Denes Lengyel 
   16      * and many others who have contributed to current and previous versions 
   17   
   18  This file contains the DAL support for many relational databases, 
   19  including: 
   20  - SQLite & SpatiaLite 
   21  - MySQL 
   22  - Postgres 
   23  - Firebird 
   24  - Oracle 
   25  - MS SQL 
   26  - DB2 
   27  - Interbase 
   28  - Ingres 
   29  - Informix (9+ and SE) 
   30  - SapDB (experimental) 
   31  - Cubrid (experimental) 
   32  - CouchDB (experimental) 
   33  - MongoDB (in progress) 
   34  - Google:nosql 
   35  - Google:sql 
   36  - Teradata 
   37  - IMAP (experimental) 
   38   
   39  Example of usage: 
   40   
   41  >>> # from dal import DAL, Field 
   42   
   43  ### create DAL connection (and create DB if it doesn't exist) 
   44  >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), 
   45  ... folder=None) 
   46   
   47  ### define a table 'person' (create/alter as necessary) 
   48  >>> person = db.define_table('person',Field('name','string')) 
   49   
   50  ### insert a record 
   51  >>> id = person.insert(name='James') 
   52   
   53  ### retrieve it by id 
   54  >>> james = person(id) 
   55   
   56  ### retrieve it by name 
   57  >>> james = person(name='James') 
   58   
   59  ### retrieve it by arbitrary query 
   60  >>> query = (person.name=='James') & (person.name.startswith('J')) 
   61  >>> james = db(query).select(person.ALL)[0] 
   62   
   63  ### update one record 
   64  >>> james.update_record(name='Jim') 
   65  <Row {'id': 1, 'name': 'Jim'}> 
   66   
   67  ### update multiple records by query 
   68  >>> db(person.name.like('J%')).update(name='James') 
   69  1 
   70   
   71  ### delete records by query 
   72  >>> db(person.name.lower() == 'jim').delete() 
   73  0 
   74   
   75  ### retrieve multiple records (rows) 
   76  >>> people = db(person).select(orderby=person.name, 
   77  ... groupby=person.name, limitby=(0,100)) 
   78   
   79  ### further filter them 
   80  >>> james = people.find(lambda row: row.name == 'James').first() 
   81  >>> print james.id, james.name 
   82  1 James 
   83   
   84  ### check aggregates 
   85  >>> counter = person.id.count() 
   86  >>> print db(person).select(counter).first()(counter) 
   87  1 
   88   
   89  ### delete one record 
   90  >>> james.delete_record() 
   91  1 
   92   
   93  ### delete (drop) entire database table 
   94  >>> person.drop() 
   95   
   96  Supported field types: 
   97  id string text boolean integer double decimal password upload 
   98  blob time date datetime 
   99   
  100  Supported DAL URI strings: 
  101  'sqlite://test.db' 
  102  'spatialite://test.db' 
  103  'sqlite:memory' 
  104  'spatialite:memory' 
  105  'jdbc:sqlite://test.db' 
  106  'mysql://root:none@localhost/test' 
  107  'postgres://mdipierro:password@localhost/test' 
  108  'postgres:psycopg2://mdipierro:password@localhost/test' 
  109  'postgres:pg8000://mdipierro:password@localhost/test' 
  110  'jdbc:postgres://mdipierro:none@localhost/test' 
  111  'mssql://web2py:none@A64X2/web2py_test' 
  112  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  113  'oracle://username:password@database' 
  114  'firebird://user:password@server:3050/database' 
  115  'db2://DSN=dsn;UID=user;PWD=pass' 
  116  'firebird://username:password@hostname/database' 
  117  'firebird_embedded://username:password@c://path' 
  118  'informix://user:password@server:3050/database' 
  119  'informixu://user:password@server:3050/database' # unicode informix 
  120  'ingres://database'  # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name' 
  121  'google:datastore' # for google app engine datastore 
  122  'google:sql' # for google app engine with sql (mysql compatible) 
  123  'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 
  124  'imap://user:password@server:port' # experimental 
  125  'mongodb://user:password@server:port/database' # experimental 
  126   
  127  For more info: 
  128  help(DAL) 
  129  help(Field) 
  130  """ 
  131   
  132  ################################################################################### 
  133  # this file only exposes DAL and Field 
  134  ################################################################################### 
  135   
  136  __all__ = ['DAL', 'Field'] 
  137   
  138  DEFAULTLENGTH = {'string':512, 
  139                   'password':512, 
  140                   'upload':512, 
  141                   'text':2**15, 
  142                   'blob':2**31} 
  143  TIMINGSSIZE = 100 
  144  SPATIALLIBS = { 
  145      'Windows':'libspatialite', 
  146      'Linux':'libspatialite.so', 
  147      'Darwin':'libspatialite.dylib' 
  148      } 
  149  DEFAULT_URI = 'sqlite://dummy.db' 
  150   
  151  import re 
  152  import sys 
  153  import locale 
  154  import os 
  155  import types 
  156  import datetime 
  157  import threading 
  158  import time 
  159  import csv 
  160  import cgi 
  161  import copy 
  162  import socket 
  163  import logging 
  164  import base64 
  165  import shutil 
  166  import marshal 
  167  import decimal 
  168  import struct 
  169  import urllib 
  170  import hashlib 
  171  import uuid 
  172  import glob 
  173  import traceback 
  174  import platform 
  175   
  176  PYTHON_VERSION = sys.version_info[0] 
  177  if PYTHON_VERSION == 2: 
  178      import cPickle as pickle 
  179      import cStringIO as StringIO 
  180      import copy_reg as copyreg 
  181      hashlib_md5 = hashlib.md5 
  182      bytes, unicode = str, unicode 
  183  else: 
  184      import pickle 
  185      from io import StringIO as StringIO 
  186      import copyreg 
  187      long = int 
  188      hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) 
  189      bytes, unicode = bytes, str 
  190   
  191  CALLABLETYPES = (types.LambdaType, types.FunctionType, 
  192                   types.BuiltinFunctionType, 
  193                   types.MethodType, types.BuiltinMethodType) 
  194   
  195  TABLE_ARGS = set( 
  196      ('migrate','primarykey','fake_migrate','format','redefine', 
  197       'singular','plural','trigger_name','sequence_name','fields', 
  198       'common_filter','polymodel','table_class','on_define','actual_name')) 
  199   
  200  SELECT_ARGS = set( 
  201      ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 
  202       'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby')) 
  203   
  204  ogetattr = object.__getattribute__ 
  205  osetattr = object.__setattr__ 
  206  exists = os.path.exists 
  207  pjoin = os.path.join 
  208   
  209  ################################################################################### 
  210  # following checks allow the use of dal without web2py, as a standalone module 
  211  ################################################################################### 
  212  try: 
  213      from utils import web2py_uuid 
  214  except (ImportError, SystemError): 
  215      import uuid 
216 - def web2py_uuid(): return str(uuid.uuid4())
217 218 try: 219 import portalocker 220 have_portalocker = True 221 except ImportError: 222 have_portalocker = False 223 224 try: 225 import serializers 226 have_serializers = True 227 except ImportError: 228 have_serializers = False 229 try: 230 import json as simplejson 231 except ImportError: 232 try: 233 import gluon.contrib.simplejson as simplejson 234 except ImportError: 235 simplejson = None 236 237 try: 238 import validators 239 have_validators = True 240 except (ImportError, SyntaxError): 241 have_validators = False 242 243 LOGGER = logging.getLogger("web2py.dal") 244 DEFAULT = lambda:0 245 246 GLOBAL_LOCKER = threading.RLock() 247 THREAD_LOCAL = threading.local() 248 249 # internal representation of tables with field 250 # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] 251 252 REGEX_TYPE = re.compile('^([\w\_\:]+)') 253 REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') 254 REGEX_W = re.compile('^\w+$') 255 REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$') 256 REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') 257 REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') 258 REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') 259 REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 260 REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") 261 REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 262 REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 263 REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') 264 REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') 265 REGEX_QUOTES = re.compile("'[^']*'") 266 REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') 267 REGEX_PASSWORD = re.compile('\://([^:@]*)\:') 268 REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)' 269 270 # list of drivers will be built on the fly 271 # and lists only what is available 272 DRIVERS = [] 273 274 try: 275 from new import classobj 276 from google.appengine.ext import db as gae 277 from google.appengine.api import namespace_manager, rdbms 278 from google.appengine.api.datastore_types import Key ### for belongs on ID 279 from google.appengine.ext.db.polymodel import PolyModel 280 DRIVERS.append('google') 281 except ImportError: 282 pass 283 284 if not 'google' in DRIVERS: 285 286 try: 287 from pysqlite2 import dbapi2 as sqlite2 288 DRIVERS.append('SQLite(sqlite2)') 289 except ImportError: 290 LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') 291 292 try: 293 from sqlite3 import dbapi2 as sqlite3 294 DRIVERS.append('SQLite(sqlite3)') 295 except ImportError: 296 LOGGER.debug('no SQLite drivers sqlite3') 297 298 try: 299 # first try contrib driver, then from site-packages (if installed) 300 try: 301 import contrib.pymysql as pymysql 302 # monkeypatch pymysql because they havent fixed the bug: 303 # https://github.com/petehunt/PyMySQL/issues/86 304 pymysql.ESCAPE_REGEX = re.compile("'") 305 pymysql.ESCAPE_MAP = {"'": "''"} 306 # end monkeypatch 307 except ImportError: 308 import pymysql 309 DRIVERS.append('MySQL(pymysql)') 310 except ImportError: 311 LOGGER.debug('no MySQL driver pymysql') 312 313 try: 314 import MySQLdb 315 DRIVERS.append('MySQL(MySQLdb)') 316 except ImportError: 317 LOGGER.debug('no MySQL driver MySQLDB') 318 319 320 try: 321 import psycopg2 322 from psycopg2.extensions import adapt as psycopg2_adapt 323 DRIVERS.append('PostgreSQL(psycopg2)') 324 except ImportError: 325 LOGGER.debug('no PostgreSQL driver psycopg2') 326 327 try: 328 # first try contrib driver, then from site-packages (if installed) 329 try: 330 import contrib.pg8000.dbapi as pg8000 331 except ImportError: 332 import pg8000.dbapi as pg8000 333 DRIVERS.append('PostgreSQL(pg8000)') 334 except ImportError: 335 LOGGER.debug('no PostgreSQL driver pg8000') 336 337 try: 338 import cx_Oracle 339 DRIVERS.append('Oracle(cx_Oracle)') 340 except ImportError: 341 LOGGER.debug('no Oracle driver cx_Oracle') 342 343 try: 344 try: 345 import pyodbc 346 except ImportError: 347 try: 348 import contrib.pypyodbc as pyodbc 349 except Exception, e: 350 raise ImportError(str(e)) 351 DRIVERS.append('MSSQL(pyodbc)') 352 DRIVERS.append('DB2(pyodbc)') 353 DRIVERS.append('Teradata(pyodbc)') 354 DRIVERS.append('Ingres(pyodbc)') 355 except ImportError: 356 LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc') 357 358 try: 359 import Sybase 360 DRIVERS.append('Sybase(Sybase)') 361 except ImportError: 362 LOGGER.debug('no Sybase driver') 363 364 try: 365 import kinterbasdb 366 DRIVERS.append('Interbase(kinterbasdb)') 367 DRIVERS.append('Firebird(kinterbasdb)') 368 except ImportError: 369 LOGGER.debug('no Firebird/Interbase driver kinterbasdb') 370 371 try: 372 import fdb 373 DRIVERS.append('Firebird(fdb)') 374 except ImportError: 375 LOGGER.debug('no Firebird driver fdb') 376 ##### 377 try: 378 import firebirdsql 379 DRIVERS.append('Firebird(firebirdsql)') 380 except ImportError: 381 LOGGER.debug('no Firebird driver firebirdsql') 382 383 try: 384 import informixdb 385 DRIVERS.append('Informix(informixdb)') 386 LOGGER.warning('Informix support is experimental') 387 except ImportError: 388 LOGGER.debug('no Informix driver informixdb') 389 390 try: 391 import sapdb 392 DRIVERS.append('SQL(sapdb)') 393 LOGGER.warning('SAPDB support is experimental') 394 except ImportError: 395 LOGGER.debug('no SAP driver sapdb') 396 397 try: 398 import cubriddb 399 DRIVERS.append('Cubrid(cubriddb)') 400 LOGGER.warning('Cubrid support is experimental') 401 except ImportError: 402 LOGGER.debug('no Cubrid driver cubriddb') 403 404 try: 405 from com.ziclix.python.sql import zxJDBC 406 import java.sql 407 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 408 from org.sqlite import JDBC # required by java.sql; ensure we have it 409 zxJDBC_sqlite = java.sql.DriverManager 410 DRIVERS.append('PostgreSQL(zxJDBC)') 411 DRIVERS.append('SQLite(zxJDBC)') 412 LOGGER.warning('zxJDBC support is experimental') 413 is_jdbc = True 414 except ImportError: 415 LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') 416 is_jdbc = False 417 418 try: 419 import couchdb 420 DRIVERS.append('CouchDB(couchdb)') 421 except ImportError: 422 LOGGER.debug('no Couchdb driver couchdb') 423 424 try: 425 import pymongo 426 DRIVERS.append('MongoDB(pymongo)') 427 except: 428 LOGGER.debug('no MongoDB driver pymongo') 429 430 try: 431 import imaplib 432 DRIVERS.append('IMAP(imaplib)') 433 except: 434 LOGGER.debug('no IMAP driver imaplib') 435 436 PLURALIZE_RULES = [ 437 (re.compile('child$'), re.compile('child$'), 'children'), 438 (re.compile('oot$'), re.compile('oot$'), 'eet'), 439 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 440 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 441 (re.compile('sis$'), re.compile('sis$'), 'ses'), 442 (re.compile('man$'), re.compile('man$'), 'men'), 443 (re.compile('ife$'), re.compile('ife$'), 'ives'), 444 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 445 (re.compile('lf$'), re.compile('lf$'), 'lves'), 446 (re.compile('[sxz]$'), re.compile('$'), 'es'), 447 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 448 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 449 (re.compile('$'), re.compile('$'), 's'), 450 ]
451 452 -def pluralize(singular, rules=PLURALIZE_RULES):
453 for line in rules: 454 re_search, re_sub, replace = line 455 plural = re_search.search(singular) and re_sub.sub(replace, singular) 456 if plural: return plural
457
458 -def hide_password(uri):
459 if isinstance(uri,(list,tuple)): 460 return [hide_password(item) for item in uri] 461 return REGEX_NOPASSWD.sub('******',uri)
462
463 -def OR(a,b):
464 return a|b
465
466 -def AND(a,b):
467 return a&b
468
469 -def IDENTITY(x): return x
470
471 -def varquote_aux(name,quotestr='%s'):
472 return name if REGEX_W.match(name) else quotestr % name
473
474 -def quote_keyword(a,keyword='timestamp'):
475 regex = re.compile('\.keyword(?=\w)') 476 a = regex.sub('."%s"' % keyword,a) 477 return a
478 479 if 'google' in DRIVERS: 480 481 is_jdbc = False
482 483 - class GAEDecimalProperty(gae.Property):
484 """ 485 GAE decimal implementation 486 """ 487 data_type = decimal.Decimal 488
489 - def __init__(self, precision, scale, **kwargs):
490 super(GAEDecimalProperty, self).__init__(self, **kwargs) 491 d = '1.' 492 for x in range(scale): 493 d += '0' 494 self.round = decimal.Decimal(d)
495
496 - def get_value_for_datastore(self, model_instance):
497 value = super(GAEDecimalProperty, self)\ 498 .get_value_for_datastore(model_instance) 499 if value is None or value == '': 500 return None 501 else: 502 return str(value)
503
504 - def make_value_from_datastore(self, value):
505 if value is None or value == '': 506 return None 507 else: 508 return decimal.Decimal(value).quantize(self.round)
509
510 - def validate(self, value):
511 value = super(GAEDecimalProperty, self).validate(value) 512 if value is None or isinstance(value, decimal.Decimal): 513 return value 514 elif isinstance(value, basestring): 515 return decimal.Decimal(value) 516 raise gae.BadValueError("Property %s must be a Decimal or string."\ 517 % self.name)
518
519 ################################################################################### 520 # class that handles connection pooling (all adapters are derived from this one) 521 ################################################################################### 522 523 -class ConnectionPool(object):
524 525 POOLS = {} 526 check_active_connection = True 527 528 @staticmethod
529 - def set_folder(folder):
531 532 # ## this allows gluon to commit/rollback all dbs in this thread 533
534 - def close(self,action='commit',really=True):
535 if action: 536 if callable(action): 537 action(self) 538 else: 539 getattr(self, action)() 540 # ## if you want pools, recycle this connection 541 if self.pool_size: 542 GLOBAL_LOCKER.acquire() 543 pool = ConnectionPool.POOLS[self.uri] 544 if len(pool) < self.pool_size: 545 pool.append(self.connection) 546 really = False 547 GLOBAL_LOCKER.release() 548 if really: 549 self.close_connection() 550 self.connection = None
551 552 @staticmethod
553 - def close_all_instances(action):
554 """ to close cleanly databases in a multithreaded environment """ 555 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 556 for db_uid, db_group in dbs: 557 for db in db_group: 558 if hasattr(db,'_adapter'): 559 db._adapter.close(action) 560 getattr(THREAD_LOCAL,'db_instances',{}).clear() 561 getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() 562 if callable(action): 563 action(None) 564 return
565
566 - def find_or_make_work_folder(self):
567 """ this actually does not make the folder. it has to be there """ 568 self.folder = getattr(THREAD_LOCAL,'folder','') 569 570 if (os.path.isabs(self.folder) and 571 isinstance(self, UseDatabaseStoredFile) and 572 self.folder.startswith(os.getcwd())): 573 self.folder = os.path.relpath(self.folder, os.getcwd()) 574 575 # Creating the folder if it does not exist 576 if False and self.folder and not exists(self.folder): 577 os.mkdir(self.folder)
578
579 - def after_connection_hook(self):
580 """hook for the after_connection parameter""" 581 if callable(self._after_connection): 582 self._after_connection(self) 583 self.after_connection()
584
585 - def after_connection(self):
586 """ this it is supposed to be overloaded by adapters""" 587 pass
588
589 - def reconnect(self, f=None, cursor=True):
590 """ 591 this function defines: self.connection and self.cursor 592 (iff cursor is True) 593 if self.pool_size>0 it will try pull the connection from the pool 594 if the connection is not active (closed by db server) it will loop 595 if not self.pool_size or no active connections in pool makes a new one 596 """ 597 if getattr(self,'connection', None) != None: 598 return 599 if f is None: 600 f = self.connector 601 602 # if not hasattr(self, "driver") or self.driver is None: 603 # LOGGER.debug("Skipping connection since there's no driver") 604 # return 605 606 if not self.pool_size: 607 self.connection = f() 608 self.cursor = cursor and self.connection.cursor() 609 else: 610 uri = self.uri 611 POOLS = ConnectionPool.POOLS 612 while True: 613 GLOBAL_LOCKER.acquire() 614 if not uri in POOLS: 615 POOLS[uri] = [] 616 if POOLS[uri]: 617 self.connection = POOLS[uri].pop() 618 GLOBAL_LOCKER.release() 619 self.cursor = cursor and self.connection.cursor() 620 try: 621 if self.cursor and self.check_active_connection: 622 self.execute('SELECT 1;') 623 break 624 except: 625 pass 626 else: 627 GLOBAL_LOCKER.release() 628 self.connection = f() 629 self.cursor = cursor and self.connection.cursor() 630 break 631 self.after_connection_hook()
632
633 634 ################################################################################### 635 # this is a generic adapter that does nothing; all others are derived from this one 636 ################################################################################### 637 638 -class BaseAdapter(ConnectionPool):
639 native_json = False 640 driver = None 641 driver_name = None 642 drivers = () # list of drivers from which to pick 643 connection = None 644 commit_on_alter_table = False 645 support_distributed_transaction = False 646 uploads_in_blob = False 647 can_select_for_update = True 648 dbpath = None 649 folder = None 650 651 TRUE = 'T' 652 FALSE = 'F' 653 T_SEP = ' ' 654 QUOTE_TEMPLATE = '"%s"' 655 656 types = { 657 'boolean': 'CHAR(1)', 658 'string': 'CHAR(%(length)s)', 659 'text': 'TEXT', 660 'json': 'TEXT', 661 'password': 'CHAR(%(length)s)', 662 'blob': 'BLOB', 663 'upload': 'CHAR(%(length)s)', 664 'integer': 'INTEGER', 665 'bigint': 'INTEGER', 666 'float':'DOUBLE', 667 'double': 'DOUBLE', 668 'decimal': 'DOUBLE', 669 'date': 'DATE', 670 'time': 'TIME', 671 'datetime': 'TIMESTAMP', 672 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 673 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 674 'list:integer': 'TEXT', 675 'list:string': 'TEXT', 676 'list:reference': 'TEXT', 677 # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 678 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 679 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 680 } 681
682 - def isOperationalError(self,exception):
683 if not hasattr(self.driver, "OperationalError"): 684 return None 685 return isinstance(exception, self.driver.OperationalError)
686
687 - def id_query(self, table):
688 return table._id != None
689
690 - def adapt(self, obj):
691 return "'%s'" % obj.replace("'", "''")
692
693 - def smart_adapt(self, obj):
694 if isinstance(obj,(int,float)): 695 return str(obj) 696 return self.adapt(str(obj))
697
698 - def file_exists(self, filename):
699 """ 700 to be used ONLY for files that on GAE may not be on filesystem 701 """ 702 return exists(filename)
703
704 - def file_open(self, filename, mode='rb', lock=True):
705 """ 706 to be used ONLY for files that on GAE may not be on filesystem 707 """ 708 if have_portalocker and lock: 709 fileobj = portalocker.LockedFile(filename,mode) 710 else: 711 fileobj = open(filename,mode) 712 return fileobj
713
714 - def file_close(self, fileobj):
715 """ 716 to be used ONLY for files that on GAE may not be on filesystem 717 """ 718 if fileobj: 719 fileobj.close()
720
721 - def file_delete(self, filename):
722 os.unlink(filename)
723
724 - def find_driver(self,adapter_args,uri=None):
725 self.adapter_args = adapter_args 726 if getattr(self,'driver',None) != None: 727 return 728 drivers_available = [driver for driver in self.drivers 729 if driver in globals()] 730 if uri: 731 items = uri.split('://',1)[0].split(':') 732 request_driver = items[1] if len(items)>1 else None 733 else: 734 request_driver = None 735 request_driver = request_driver or adapter_args.get('driver') 736 if request_driver: 737 if request_driver in drivers_available: 738 self.driver_name = request_driver 739 self.driver = globals().get(request_driver) 740 else: 741 raise RuntimeError("driver %s not available" % request_driver) 742 elif drivers_available: 743 self.driver_name = drivers_available[0] 744 self.driver = globals().get(self.driver_name) 745 else: 746 raise RuntimeError("no driver available %s" % str(self.drivers))
747
748 - def log(self, message, table=None):
749 """ Logs migrations 750 751 It will not log changes if logfile is not specified. Defaults 752 to sql.log 753 """ 754 755 isabs = None 756 logfilename = self.adapter_args.get('logfile','sql.log') 757 writelog = bool(logfilename) 758 if writelog: 759 isabs = os.path.isabs(logfilename) 760 761 if table and table._dbt and writelog and self.folder: 762 if isabs: 763 table._loggername = logfilename 764 else: 765 table._loggername = pjoin(self.folder, logfilename) 766 logfile = self.file_open(table._loggername, 'a') 767 logfile.write(message) 768 self.file_close(logfile)
769 770
771 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 772 credential_decoder=IDENTITY, driver_args={}, 773 adapter_args={},do_connect=True, after_connection=None):
774 self.db = db 775 self.dbengine = "None" 776 self.uri = uri 777 self.pool_size = pool_size 778 self.folder = folder 779 self.db_codec = db_codec 780 self._after_connection = after_connection 781 class Dummy(object): 782 lastrowid = 1 783 def __getattr__(self, value): 784 return lambda *a, **b: []
785 self.connection = Dummy() 786 self.cursor = Dummy() 787
788 - def sequence_name(self,tablename):
789 return '%s_sequence' % tablename
790
791 - def trigger_name(self,tablename):
792 return '%s_sequence' % tablename
793
794 - def varquote(self,name):
795 return name
796
797 - def create_table(self, table, 798 migrate=True, 799 fake_migrate=False, 800 polymodel=None):
801 db = table._db 802 fields = [] 803 # PostGIS geo fields are added after the table has been created 804 postcreation_fields = [] 805 sql_fields = {} 806 sql_fields_aux = {} 807 TFK = {} 808 tablename = table._tablename 809 sortable = 0 810 types = self.types 811 for field in table: 812 sortable += 1 813 field_name = field.name 814 field_type = field.type 815 if isinstance(field_type,SQLCustomType): 816 ftype = field_type.native or field_type.type 817 elif field_type.startswith('reference'): 818 referenced = field_type[10:].strip() 819 if referenced == '.': 820 referenced = tablename 821 constraint_name = self.constraint_name(tablename, field_name) 822 if not '.' in referenced \ 823 and referenced != tablename \ 824 and hasattr(table,'_primarykey'): 825 ftype = types['integer'] 826 else: 827 if hasattr(table,'_primarykey'): 828 rtablename,rfieldname = referenced.split('.') 829 rtable = db[rtablename] 830 rfield = rtable[rfieldname] 831 # must be PK reference or unique 832 if rfieldname in rtable._primarykey or \ 833 rfield.unique: 834 ftype = types[rfield.type[:9]] % \ 835 dict(length=rfield.length) 836 # multicolumn primary key reference? 837 if not rfield.unique and len(rtable._primarykey)>1: 838 # then it has to be a table level FK 839 if rtablename not in TFK: 840 TFK[rtablename] = {} 841 TFK[rtablename][rfieldname] = field_name 842 else: 843 ftype = ftype + \ 844 types['reference FK'] % dict( 845 constraint_name = constraint_name, # should be quoted 846 foreign_key = '%s (%s)' % (rtablename, 847 rfieldname), 848 table_name = tablename, 849 field_name = field_name, 850 on_delete_action=field.ondelete) 851 else: 852 # make a guess here for circular references 853 if referenced in db: 854 id_fieldname = db[referenced]._id.name 855 elif referenced == tablename: 856 id_fieldname = table._id.name 857 else: #make a guess 858 id_fieldname = 'id' 859 ftype = types[field_type[:9]] % dict( 860 index_name = field_name+'__idx', 861 field_name = field_name, 862 constraint_name = constraint_name, 863 foreign_key = '%s (%s)' % (referenced, 864 id_fieldname), 865 on_delete_action=field.ondelete) 866 elif field_type.startswith('list:reference'): 867 ftype = types[field_type[:14]] 868 elif field_type.startswith('decimal'): 869 precision, scale = map(int,field_type[8:-1].split(',')) 870 ftype = types[field_type[:7]] % \ 871 dict(precision=precision,scale=scale) 872 elif field_type.startswith('geo'): 873 if not hasattr(self,'srid'): 874 raise RuntimeError('Adapter does not support geometry') 875 srid = self.srid 876 geotype, parms = field_type[:-1].split('(') 877 if not geotype in types: 878 raise SyntaxError( 879 'Field: unknown field type: %s for %s' \ 880 % (field_type, field_name)) 881 ftype = types[geotype] 882 if self.dbengine == 'postgres' and geotype == 'geometry': 883 # parameters: schema, srid, dimension 884 dimension = 2 # GIS.dimension ??? 885 parms = parms.split(',') 886 if len(parms) == 3: 887 schema, srid, dimension = parms 888 elif len(parms) == 2: 889 schema, srid = parms 890 else: 891 schema = parms[0] 892 ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] 893 ftype = ftype % dict(schema=schema, 894 tablename=tablename, 895 fieldname=field_name, srid=srid, 896 dimension=dimension) 897 postcreation_fields.append(ftype) 898 elif not field_type in types: 899 raise SyntaxError('Field: unknown field type: %s for %s' % \ 900 (field_type, field_name)) 901 else: 902 ftype = types[field_type]\ 903 % dict(length=field.length) 904 if not field_type.startswith('id') and \ 905 not field_type.startswith('reference'): 906 if field.notnull: 907 ftype += ' NOT NULL' 908 else: 909 ftype += self.ALLOW_NULL() 910 if field.unique: 911 ftype += ' UNIQUE' 912 if field.custom_qualifier: 913 ftype += ' %s' % field.custom_qualifier 914 915 # add to list of fields 916 sql_fields[field_name] = dict( 917 length=field.length, 918 unique=field.unique, 919 notnull=field.notnull, 920 sortable=sortable, 921 type=str(field_type), 922 sql=ftype) 923 924 if field.notnull and not field.default is None: 925 # Caveat: sql_fields and sql_fields_aux 926 # differ for default values. 927 # sql_fields is used to trigger migrations and sql_fields_aux 928 # is used for create tables. 929 # The reason is that we do not want to trigger 930 # a migration simply because a default value changes. 931 not_null = self.NOT_NULL(field.default, field_type) 932 ftype = ftype.replace('NOT NULL', not_null) 933 sql_fields_aux[field_name] = dict(sql=ftype) 934 # Postgres - PostGIS: 935 # geometry fields are added after the table has been created, not now 936 if not (self.dbengine == 'postgres' and \ 937 field_type.startswith('geom')): 938 fields.append('%s %s' % (field_name, ftype)) 939 other = ';' 940 941 # backend-specific extensions to fields 942 if self.dbengine == 'mysql': 943 if not hasattr(table, "_primarykey"): 944 fields.append('PRIMARY KEY(%s)' % table._id.name) 945 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 946 947 fields = ',\n '.join(fields) 948 for rtablename in TFK: 949 rfields = TFK[rtablename] 950 pkeys = db[rtablename]._primarykey 951 fkeys = [ rfields[k] for k in pkeys ] 952 fields = fields + ',\n ' + \ 953 types['reference TFK'] % dict( 954 table_name = tablename, 955 field_name=', '.join(fkeys), 956 foreign_table = rtablename, 957 foreign_key = ', '.join(pkeys), 958 on_delete_action = field.ondelete) 959 960 if getattr(table,'_primarykey',None): 961 query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ 962 (tablename, fields, 963 self.PRIMARY_KEY(', '.join(table._primarykey)),other) 964 else: 965 query = "CREATE TABLE %s(\n %s\n)%s" % \ 966 (tablename, fields, other) 967 968 if self.uri.startswith('sqlite:///') \ 969 or self.uri.startswith('spatialite:///'): 970 path_encoding = sys.getfilesystemencoding() \ 971 or locale.getdefaultlocale()[1] or 'utf8' 972 dbpath = self.uri[9:self.uri.rfind('/')]\ 973 .decode('utf8').encode(path_encoding) 974 else: 975 dbpath = self.folder 976 977 if not migrate: 978 return query 979 elif self.uri.startswith('sqlite:memory')\ 980 or self.uri.startswith('spatialite:memory'): 981 table._dbt = None 982 elif isinstance(migrate, str): 983 table._dbt = pjoin(dbpath, migrate) 984 else: 985 table._dbt = pjoin( 986 dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) 987 988 if not table._dbt or not self.file_exists(table._dbt): 989 if table._dbt: 990 self.log('timestamp: %s\n%s\n' 991 % (datetime.datetime.today().isoformat(), 992 query), table) 993 if not fake_migrate: 994 self.create_sequence_and_triggers(query,table) 995 table._db.commit() 996 # Postgres geom fields are added now, 997 # after the table has been created 998 for query in postcreation_fields: 999 self.execute(query) 1000 table._db.commit() 1001 if table._dbt: 1002 tfile = self.file_open(table._dbt, 'w') 1003 pickle.dump(sql_fields, tfile) 1004 self.file_close(tfile) 1005 if fake_migrate: 1006 self.log('faked!\n', table) 1007 else: 1008 self.log('success!\n', table) 1009 else: 1010 tfile = self.file_open(table._dbt, 'r') 1011 try: 1012 sql_fields_old = pickle.load(tfile) 1013 except EOFError: 1014 self.file_close(tfile) 1015 raise RuntimeError('File %s appears corrupted' % table._dbt) 1016 self.file_close(tfile) 1017 if sql_fields != sql_fields_old: 1018 self.migrate_table(table, 1019 sql_fields, sql_fields_old, 1020 sql_fields_aux, None, 1021 fake_migrate=fake_migrate) 1022 return query
1023
1024 - def migrate_table( 1025 self, 1026 table, 1027 sql_fields, 1028 sql_fields_old, 1029 sql_fields_aux, 1030 logfile, 1031 fake_migrate=False, 1032 ):
1033 1034 # logfile is deprecated (moved to adapter.log method) 1035 db = table._db 1036 db._migrated.append(table._tablename) 1037 tablename = table._tablename 1038 def fix(item): 1039 k,v=item 1040 if not isinstance(v,dict): 1041 v=dict(type='unknown',sql=v) 1042 return k.lower(),v
1043 # make sure all field names are lower case to avoid 1044 # migrations because of case cahnge 1045 sql_fields = dict(map(fix,sql_fields.iteritems())) 1046 sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) 1047 sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) 1048 if db._debug: 1049 logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) 1050 1051 keys = sql_fields.keys() 1052 for key in sql_fields_old: 1053 if not key in keys: 1054 keys.append(key) 1055 new_add = self.concat_add(tablename) 1056 1057 metadata_change = False 1058 sql_fields_current = copy.copy(sql_fields_old) 1059 for key in keys: 1060 query = None 1061 if not key in sql_fields_old: 1062 sql_fields_current[key] = sql_fields[key] 1063 if self.dbengine in ('postgres',) and \ 1064 sql_fields[key]['type'].startswith('geometry'): 1065 # 'sql' == ftype in sql 1066 query = [ sql_fields[key]['sql'] ] 1067 else: 1068 query = ['ALTER TABLE %s ADD %s %s;' % \ 1069 (tablename, key, 1070 sql_fields_aux[key]['sql'].replace(', ', new_add))] 1071 metadata_change = True 1072 elif self.dbengine in ('sqlite', 'spatialite'): 1073 if key in sql_fields: 1074 sql_fields_current[key] = sql_fields[key] 1075 metadata_change = True 1076 elif not key in sql_fields: 1077 del sql_fields_current[key] 1078 ftype = sql_fields_old[key]['type'] 1079 if (self.dbengine in ('postgres',) and 1080 ftype.startswith('geometry')): 1081 geotype, parms = ftype[:-1].split('(') 1082 schema = parms.split(',')[0] 1083 query = [ "SELECT DropGeometryColumn ('%(schema)s', "+ 1084 "'%(table)s', '%(field)s');" % 1085 dict(schema=schema, table=tablename, field=key,) ] 1086 elif self.dbengine in ('firebird',): 1087 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 1088 else: 1089 query = ['ALTER TABLE %s DROP COLUMN %s;' % 1090 (tablename, key)] 1091 metadata_change = True 1092 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 1093 and not (key in table.fields and 1094 isinstance(table[key].type, SQLCustomType)) \ 1095 and not sql_fields[key]['type'].startswith('reference')\ 1096 and not sql_fields[key]['type'].startswith('double')\ 1097 and not sql_fields[key]['type'].startswith('id'): 1098 sql_fields_current[key] = sql_fields[key] 1099 t = tablename 1100 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 1101 if self.dbengine in ('firebird',): 1102 drop_expr = 'ALTER TABLE %s DROP %s;' 1103 else: 1104 drop_expr = 'ALTER TABLE %s DROP COLUMN %s;' 1105 key_tmp = key + '__tmp' 1106 query = ['ALTER TABLE %s ADD %s %s;' % (t, key_tmp, tt), 1107 'UPDATE %s SET %s=%s;' % (t, key_tmp, key), 1108 drop_expr % (t, key), 1109 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1110 'UPDATE %s SET %s=%s;' % (t, key, key_tmp), 1111 drop_expr % (t, key_tmp)] 1112 metadata_change = True 1113 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 1114 sql_fields_current[key] = sql_fields[key] 1115 metadata_change = True 1116 1117 if query: 1118 self.log('timestamp: %s\n' 1119 % datetime.datetime.today().isoformat(), table) 1120 db['_lastsql'] = '\n'.join(query) 1121 for sub_query in query: 1122 self.log(sub_query + '\n', table) 1123 if fake_migrate: 1124 if db._adapter.commit_on_alter_table: 1125 self.save_dbt(table,sql_fields_current) 1126 self.log('faked!\n', table) 1127 else: 1128 self.execute(sub_query) 1129 # Caveat: mysql, oracle and firebird 1130 # do not allow multiple alter table 1131 # in one transaction so we must commit 1132 # partial transactions and 1133 # update table._dbt after alter table. 1134 if db._adapter.commit_on_alter_table: 1135 db.commit() 1136 self.save_dbt(table,sql_fields_current) 1137 self.log('success!\n', table) 1138 1139 elif metadata_change: 1140 self.save_dbt(table,sql_fields_current) 1141 1142 if metadata_change and not (query and db._adapter.commit_on_alter_table): 1143 db.commit() 1144 self.save_dbt(table,sql_fields_current) 1145 self.log('success!\n', table) 1146
1147 - def save_dbt(self,table, sql_fields_current):
1148 tfile = self.file_open(table._dbt, 'w') 1149 pickle.dump(sql_fields_current, tfile) 1150 self.file_close(tfile)
1151
1152 - def LOWER(self, first):
1153 return 'LOWER(%s)' % self.expand(first)
1154
1155 - def UPPER(self, first):
1156 return 'UPPER(%s)' % self.expand(first)
1157
1158 - def COUNT(self, first, distinct=None):
1159 return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ 1160 % self.expand(first)
1161
1162 - def EXTRACT(self, first, what):
1163 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
1164
1165 - def EPOCH(self, first):
1166 return self.EXTRACT(first, 'epoch')
1167
1168 - def LENGTH(self, first):
1169 return "LENGTH(%s)" % self.expand(first)
1170
1171 - def AGGREGATE(self, first, what):
1172 return "%s(%s)" % (what, self.expand(first))
1173
1174 - def JOIN(self):
1175 return 'JOIN'
1176
1177 - def LEFT_JOIN(self):
1178 return 'LEFT JOIN'
1179
1180 - def RANDOM(self):
1181 return 'Random()'
1182
1183 - def NOT_NULL(self, default, field_type):
1184 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
1185
1186 - def COALESCE(self, first, second):
1187 expressions = [self.expand(first)]+[self.expand(e) for e in second] 1188 return 'COALESCE(%s)' % ','.join(expressions)
1189
1190 - def COALESCE_ZERO(self, first):
1191 return 'COALESCE(%s,0)' % self.expand(first)
1192
1193 - def RAW(self, first):
1194 return first
1195
1196 - def ALLOW_NULL(self):
1197 return ''
1198
1199 - def SUBSTRING(self, field, parameters):
1200 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1201
1202 - def PRIMARY_KEY(self, key):
1203 return 'PRIMARY KEY(%s)' % key
1204
1205 - def _drop(self, table, mode):
1206 return ['DROP TABLE %s;' % table]
1207
1208 - def drop(self, table, mode=''):
1209 db = table._db 1210 queries = self._drop(table, mode) 1211 for query in queries: 1212 if table._dbt: 1213 self.log(query + '\n', table) 1214 self.execute(query) 1215 db.commit() 1216 del db[table._tablename] 1217 del db.tables[db.tables.index(table._tablename)] 1218 db._remove_references_to(table) 1219 if table._dbt: 1220 self.file_delete(table._dbt) 1221 self.log('success!\n', table)
1222
1223 - def _insert(self, table, fields):
1224 if fields: 1225 keys = ','.join(f.name for f, v in fields) 1226 values = ','.join(self.expand(v, f.type) for f, v in fields) 1227 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values) 1228 else: 1229 return self._insert_empty(table)
1230
1231 - def _insert_empty(self, table):
1232 return 'INSERT INTO %s DEFAULT VALUES;' % table
1233
1234 - def insert(self, table, fields):
1235 query = self._insert(table,fields) 1236 try: 1237 self.execute(query) 1238 except Exception: 1239 e = sys.exc_info()[1] 1240 if hasattr(table,'_on_insert_error'): 1241 return table._on_insert_error(table,fields,e) 1242 raise e 1243 if hasattr(table,'_primarykey'): 1244 return dict([(k[0].name, k[1]) for k in fields \ 1245 if k[0].name in table._primarykey]) 1246 id = self.lastrowid(table) 1247 if not isinstance(id,int): 1248 return id 1249 rid = Reference(id) 1250 (rid._table, rid._record) = (table, None) 1251 return rid
1252
1253 - def bulk_insert(self, table, items):
1254 return [self.insert(table,item) for item in items]
1255
1256 - def NOT(self, first):
1257 return '(NOT %s)' % self.expand(first)
1258
1259 - def AND(self, first, second):
1260 return '(%s AND %s)' % (self.expand(first), self.expand(second))
1261
1262 - def OR(self, first, second):
1263 return '(%s OR %s)' % (self.expand(first), self.expand(second))
1264
1265 - def BELONGS(self, first, second):
1266 if isinstance(second, str): 1267 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 1268 elif not second: 1269 return '(1=0)' 1270 items = ','.join(self.expand(item, first.type) for item in second) 1271 return '(%s IN (%s))' % (self.expand(first), items)
1272
1273 - def REGEXP(self, first, second):
1274 "regular expression operator" 1275 raise NotImplementedError
1276
1277 - def LIKE(self, first, second):
1278 "case sensitive like operator" 1279 raise NotImplementedError
1280
1281 - def ILIKE(self, first, second):
1282 "case in-sensitive like operator" 1283 return '(%s LIKE %s)' % (self.expand(first), 1284 self.expand(second, 'string'))
1285
1286 - def STARTSWITH(self, first, second):
1287 return '(%s LIKE %s)' % (self.expand(first), 1288 self.expand(second+'%', 'string'))
1289
1290 - def ENDSWITH(self, first, second):
1291 return '(%s LIKE %s)' % (self.expand(first), 1292 self.expand('%'+second, 'string'))
1293
1294 - def CONTAINS(self,first,second,case_sensitive=False):
1295 if first.type in ('string','text', 'json'): 1296 if isinstance(second,Expression): 1297 second = Expression(None,self.CONCAT('%',Expression( 1298 None,self.REPLACE(second,('%','%%'))),'%')) 1299 else: 1300 second = '%'+str(second).replace('%','%%')+'%' 1301 elif first.type.startswith('list:'): 1302 if isinstance(second,Expression): 1303 second = Expression(None,self.CONCAT( 1304 '%|',Expression(None,self.REPLACE( 1305 Expression(None,self.REPLACE( 1306 second,('%','%%'))),('|','||'))),'|%')) 1307 else: 1308 second = '%|'+str(second).replace('%','%%')\ 1309 .replace('|','||')+'|%' 1310 op = case_sensitive and self.LIKE or self.ILIKE 1311 return op(first,second)
1312
1313 - def EQ(self, first, second=None):
1314 if second is None: 1315 return '(%s IS NULL)' % self.expand(first) 1316 return '(%s = %s)' % (self.expand(first), 1317 self.expand(second, first.type))
1318
1319 - def NE(self, first, second=None):
1320 if second is None: 1321 return '(%s IS NOT NULL)' % self.expand(first) 1322 return '(%s <> %s)' % (self.expand(first), 1323 self.expand(second, first.type))
1324
1325 - def LT(self,first,second=None):
1326 if second is None: 1327 raise RuntimeError("Cannot compare %s < None" % first) 1328 return '(%s < %s)' % (self.expand(first), 1329 self.expand(second,first.type))
1330
1331 - def LE(self,first,second=None):
1332 if second is None: 1333 raise RuntimeError("Cannot compare %s <= None" % first) 1334 return '(%s <= %s)' % (self.expand(first), 1335 self.expand(second,first.type))
1336
1337 - def GT(self,first,second=None):
1338 if second is None: 1339 raise RuntimeError("Cannot compare %s > None" % first) 1340 return '(%s > %s)' % (self.expand(first), 1341 self.expand(second,first.type))
1342
1343 - def GE(self,first,second=None):
1344 if second is None: 1345 raise RuntimeError("Cannot compare %s >= None" % first) 1346 return '(%s >= %s)' % (self.expand(first), 1347 self.expand(second,first.type))
1348
1349 - def is_numerical_type(self, ftype):
1350 return ftype in ('integer','boolean','double','bigint') or \ 1351 ftype.startswith('decimal')
1352
1353 - def REPLACE(self, first, (second, third)):
1354 return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'), 1355 self.expand(second,'string'), 1356 self.expand(third,'string'))
1357
1358 - def CONCAT(self, *items):
1359 return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
1360
1361 - def ADD(self, first, second):
1362 if self.is_numerical_type(first.type): 1363 return '(%s + %s)' % (self.expand(first), 1364 self.expand(second, first.type)) 1365 else: 1366 return self.CONCAT(first, second)
1367
1368 - def SUB(self, first, second):
1369 return '(%s - %s)' % (self.expand(first), 1370 self.expand(second, first.type))
1371
1372 - def MUL(self, first, second):
1373 return '(%s * %s)' % (self.expand(first), 1374 self.expand(second, first.type))
1375
1376 - def DIV(self, first, second):
1377 return '(%s / %s)' % (self.expand(first), 1378 self.expand(second, first.type))
1379
1380 - def MOD(self, first, second):
1381 return '(%s %% %s)' % (self.expand(first), 1382 self.expand(second, first.type))
1383
1384 - def AS(self, first, second):
1385 return '%s AS %s' % (self.expand(first), second)
1386
1387 - def ON(self, first, second):
1388 if use_common_filters(second): 1389 second = self.common_filter(second,[first._tablename]) 1390 return '%s ON %s' % (self.expand(first), self.expand(second))
1391
1392 - def INVERT(self, first):
1393 return '%s DESC' % self.expand(first)
1394
1395 - def COMMA(self, first, second):
1396 return '%s, %s' % (self.expand(first), self.expand(second))
1397
1398 - def expand(self, expression, field_type=None):
1399 if isinstance(expression, Field): 1400 out = '%s.%s' % (expression.table._tablename, expression.name) 1401 if field_type == 'string' and not expression.type in ( 1402 'string','text','json','password'): 1403 out = 'CAST(%s AS %s)' % (out, self.types['text']) 1404 return out 1405 elif isinstance(expression, (Expression, Query)): 1406 first = expression.first 1407 second = expression.second 1408 op = expression.op 1409 optional_args = expression.optional_args or {} 1410 if not second is None: 1411 out = op(first, second, **optional_args) 1412 elif not first is None: 1413 out = op(first,**optional_args) 1414 elif isinstance(op, str): 1415 if op.endswith(';'): 1416 op=op[:-1] 1417 out = '(%s)' % op 1418 else: 1419 out = op() 1420 return out 1421 elif field_type: 1422 return str(self.represent(expression,field_type)) 1423 elif isinstance(expression,(list,tuple)): 1424 return ','.join(self.represent(item,field_type) \ 1425 for item in expression) 1426 elif isinstance(expression, bool): 1427 return '1' if expression else '0' 1428 else: 1429 return str(expression)
1430
1431 - def table_alias(self,name):
1432 return str(name if isinstance(name,Table) else self.db[name])
1433
1434 - def alias(self, table, alias):
1435 """ 1436 Given a table object, makes a new table object 1437 with alias name. 1438 """ 1439 other = copy.copy(table) 1440 other['_ot'] = other._ot or other._tablename 1441 other['ALL'] = SQLALL(other) 1442 other['_tablename'] = alias 1443 for fieldname in other.fields: 1444 other[fieldname] = copy.copy(other[fieldname]) 1445 other[fieldname]._tablename = alias 1446 other[fieldname].tablename = alias 1447 other[fieldname].table = other 1448 table._db[alias] = other 1449 return other
1450
1451 - def _truncate(self, table, mode=''):
1452 tablename = table._tablename 1453 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
1454
1455 - def truncate(self, table, mode= ' '):
1456 # Prepare functions "write_to_logfile" and "close_logfile" 1457 try: 1458 queries = table._db._adapter._truncate(table, mode) 1459 for query in queries: 1460 self.log(query + '\n', table) 1461 self.execute(query) 1462 table._db.commit() 1463 self.log('success!\n', table) 1464 finally: 1465 pass
1466
1467 - def _update(self, tablename, query, fields):
1468 if query: 1469 if use_common_filters(query): 1470 query = self.common_filter(query, [tablename]) 1471 sql_w = ' WHERE ' + self.expand(query) 1472 else: 1473 sql_w = '' 1474 sql_v = ','.join(['%s=%s' % (field.name, 1475 self.expand(value, field.type)) \ 1476 for (field, value) in fields]) 1477 tablename = "%s" % self.db[tablename] 1478 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1479
1480 - def update(self, tablename, query, fields):
1481 sql = self._update(tablename, query, fields) 1482 try: 1483 self.execute(sql) 1484 except Exception: 1485 e = sys.exc_info()[1] 1486 table = self.db[tablename] 1487 if hasattr(table,'_on_update_error'): 1488 return table._on_update_error(table,query,fields,e) 1489 raise e 1490 try: 1491 return self.cursor.rowcount 1492 except: 1493 return None
1494
1495 - def _delete(self, tablename, query):
1496 if query: 1497 if use_common_filters(query): 1498 query = self.common_filter(query, [tablename]) 1499 sql_w = ' WHERE ' + self.expand(query) 1500 else: 1501 sql_w = '' 1502 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1503
1504 - def delete(self, tablename, query):
1505 sql = self._delete(tablename, query) 1506 ### special code to handle CASCADE in SQLite & SpatiaLite 1507 db = self.db 1508 table = db[tablename] 1509 if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: 1510 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1511 ### end special code to handle CASCADE in SQLite & SpatiaLite 1512 self.execute(sql) 1513 try: 1514 counter = self.cursor.rowcount 1515 except: 1516 counter = None 1517 ### special code to handle CASCADE in SQLite & SpatiaLite 1518 if self.dbengine in ('sqlite', 'spatialite') and counter: 1519 for field in table._referenced_by: 1520 if field.type=='reference '+table._tablename \ 1521 and field.ondelete=='CASCADE': 1522 db(field.belongs(deleted)).delete() 1523 ### end special code to handle CASCADE in SQLite & SpatiaLite 1524 return counter
1525
1526 - def get_table(self, query):
1527 tablenames = self.tables(query) 1528 if len(tablenames)==1: 1529 return tablenames[0] 1530 elif len(tablenames)<1: 1531 raise RuntimeError("No table selected") 1532 else: 1533 raise RuntimeError("Too many tables selected")
1534
1535 - def expand_all(self, fields, tablenames):
1536 db = self.db 1537 new_fields = [] 1538 append = new_fields.append 1539 for item in fields: 1540 if isinstance(item,SQLALL): 1541 new_fields += item._table 1542 elif isinstance(item,str): 1543 if REGEX_TABLE_DOT_FIELD.match(item): 1544 tablename,fieldname = item.split('.') 1545 append(db[tablename][fieldname]) 1546 else: 1547 append(Expression(db,lambda item=item:item)) 1548 else: 1549 append(item) 1550 # ## if no fields specified take them all from the requested tables 1551 if not new_fields: 1552 for table in tablenames: 1553 for field in db[table]: 1554 append(field) 1555 return new_fields
1556
1557 - def _select(self, query, fields, attributes):
1558 tables = self.tables 1559 for key in set(attributes.keys())-SELECT_ARGS: 1560 raise SyntaxError('invalid select attribute: %s' % key) 1561 args_get = attributes.get 1562 tablenames = tables(query) 1563 tablenames_for_common_filters = tablenames 1564 for field in fields: 1565 if isinstance(field, basestring) \ 1566 and REGEX_TABLE_DOT_FIELD.match(field): 1567 tn,fn = field.split('.') 1568 field = self.db[tn][fn] 1569 for tablename in tables(field): 1570 if not tablename in tablenames: 1571 tablenames.append(tablename) 1572 1573 if len(tablenames) < 1: 1574 raise SyntaxError('Set: no tables selected') 1575 self._colnames = map(self.expand, fields) 1576 def geoexpand(field): 1577 if isinstance(field.type,str) and field.type.startswith('geometry'): 1578 field = field.st_astext() 1579 return self.expand(field)
1580 sql_f = ', '.join(map(geoexpand, fields)) 1581 sql_o = '' 1582 sql_s = '' 1583 left = args_get('left', False) 1584 inner_join = args_get('join', False) 1585 distinct = args_get('distinct', False) 1586 groupby = args_get('groupby', False) 1587 orderby = args_get('orderby', False) 1588 having = args_get('having', False) 1589 limitby = args_get('limitby', False) 1590 orderby_on_limitby = args_get('orderby_on_limitby', True) 1591 for_update = args_get('for_update', False) 1592 if self.can_select_for_update is False and for_update is True: 1593 raise SyntaxError('invalid select attribute: for_update') 1594 if distinct is True: 1595 sql_s += 'DISTINCT' 1596 elif distinct: 1597 sql_s += 'DISTINCT ON (%s)' % distinct 1598 if inner_join: 1599 icommand = self.JOIN() 1600 if not isinstance(inner_join, (tuple, list)): 1601 inner_join = [inner_join] 1602 ijoint = [t._tablename for t in inner_join 1603 if not isinstance(t,Expression)] 1604 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1605 itables_to_merge={} #issue 490 1606 [itables_to_merge.update( 1607 dict.fromkeys(tables(t))) for t in ijoinon] 1608 ijoinont = [t.first._tablename for t in ijoinon] 1609 [itables_to_merge.pop(t) for t in ijoinont 1610 if t in itables_to_merge] #issue 490 1611 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() 1612 iexcluded = [t for t in tablenames 1613 if not t in iimportant_tablenames] 1614 if left: 1615 join = attributes['left'] 1616 command = self.LEFT_JOIN() 1617 if not isinstance(join, (tuple, list)): 1618 join = [join] 1619 joint = [t._tablename for t in join 1620 if not isinstance(t, Expression)] 1621 joinon = [t for t in join if isinstance(t, Expression)] 1622 #patch join+left patch (solves problem with ordering in left joins) 1623 tables_to_merge={} 1624 [tables_to_merge.update( 1625 dict.fromkeys(tables(t))) for t in joinon] 1626 joinont = [t.first._tablename for t in joinon] 1627 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1628 tablenames_for_common_filters = [t for t in tablenames 1629 if not t in joinont ] 1630 important_tablenames = joint + joinont + tables_to_merge.keys() 1631 excluded = [t for t in tablenames 1632 if not t in important_tablenames ] 1633 else: 1634 excluded = tablenames 1635 1636 if use_common_filters(query): 1637 query = self.common_filter(query,tablenames_for_common_filters) 1638 sql_w = ' WHERE ' + self.expand(query) if query else '' 1639 1640 if inner_join and not left: 1641 sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \ 1642 itables_to_merge.keys()]) 1643 for t in ijoinon: 1644 sql_t += ' %s %s' % (icommand, t) 1645 elif not inner_join and left: 1646 sql_t = ', '.join([self.table_alias(t) for t in excluded + \ 1647 tables_to_merge.keys()]) 1648 if joint: 1649 sql_t += ' %s %s' % (command, 1650 ','.join([self.table_alias(t) for t in joint])) 1651 for t in joinon: 1652 sql_t += ' %s %s' % (command, t) 1653 elif inner_join and left: 1654 all_tables_in_query = set(important_tablenames + \ 1655 iimportant_tablenames + \ 1656 tablenames) 1657 tables_in_joinon = set(joinont + ijoinont) 1658 tables_not_in_joinon = \ 1659 all_tables_in_query.difference(tables_in_joinon) 1660 sql_t = ','.join([self.table_alias(t) for t in tables_not_in_joinon]) 1661 for t in ijoinon: 1662 sql_t += ' %s %s' % (icommand, t) 1663 if joint: 1664 sql_t += ' %s %s' % (command, 1665 ','.join([self.table_alias(t) for t in joint])) 1666 for t in joinon: 1667 sql_t += ' %s %s' % (command, t) 1668 else: 1669 sql_t = ', '.join(self.table_alias(t) for t in tablenames) 1670 if groupby: 1671 if isinstance(groupby, (list, tuple)): 1672 groupby = xorify(groupby) 1673 sql_o += ' GROUP BY %s' % self.expand(groupby) 1674 if having: 1675 sql_o += ' HAVING %s' % attributes['having'] 1676 if orderby: 1677 if isinstance(orderby, (list, tuple)): 1678 orderby = xorify(orderby) 1679 if str(orderby) == '<random>': 1680 sql_o += ' ORDER BY %s' % self.RANDOM() 1681 else: 1682 sql_o += ' ORDER BY %s' % self.expand(orderby) 1683 if (limitby and not groupby and tablenames and orderby_on_limitby and not orderby): 1684 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in (hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey or [self.db[t]._id.name])]) 1685 # oracle does not support limitby 1686 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1687 if for_update and self.can_select_for_update is True: 1688 sql = sql.rstrip(';') + ' FOR UPDATE;' 1689 return sql 1690
1691 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1692 if limitby: 1693 (lmin, lmax) = limitby 1694 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1695 return 'SELECT %s %s FROM %s%s%s;' % \ 1696 (sql_s, sql_f, sql_t, sql_w, sql_o)
1697
1698 - def _fetchall(self):
1699 return self.cursor.fetchall()
1700
1701 - def _select_aux(self,sql,fields,attributes):
1702 args_get = attributes.get 1703 cache = args_get('cache',None) 1704 if not cache: 1705 self.execute(sql) 1706 rows = self._fetchall() 1707 else: 1708 (cache_model, time_expire) = cache 1709 key = self.uri + '/' + sql + '/rows' 1710 if len(key)>200: key = hashlib_md5(key).hexdigest() 1711 def _select_aux2(): 1712 self.execute(sql) 1713 return self._fetchall()
1714 rows = cache_model(key,_select_aux2,time_expire) 1715 if isinstance(rows,tuple): 1716 rows = list(rows) 1717 limitby = args_get('limitby', None) or (0,) 1718 rows = self.rowslice(rows,limitby[0],None) 1719 processor = args_get('processor',self.parse) 1720 cacheable = args_get('cacheable',False) 1721 return processor(rows,fields,self._colnames,cacheable=cacheable) 1722
1723 - def select(self, query, fields, attributes):
1724 """ 1725 Always returns a Rows object, possibly empty. 1726 """ 1727 sql = self._select(query, fields, attributes) 1728 cache = attributes.get('cache', None) 1729 if cache and attributes.get('cacheable',False): 1730 del attributes['cache'] 1731 (cache_model, time_expire) = cache 1732 key = self.uri + '/' + sql 1733 if len(key)>200: key = hashlib_md5(key).hexdigest() 1734 args = (sql,fields,attributes) 1735 return cache_model( 1736 key, 1737 lambda self=self,args=args:self._select_aux(*args), 1738 time_expire) 1739 else: 1740 return self._select_aux(sql,fields,attributes)
1741
1742 - def _count(self, query, distinct=None):
1743 tablenames = self.tables(query) 1744 if query: 1745 if use_common_filters(query): 1746 query = self.common_filter(query, tablenames) 1747 sql_w = ' WHERE ' + self.expand(query) 1748 else: 1749 sql_w = '' 1750 sql_t = ','.join(self.table_alias(t) for t in tablenames) 1751 if distinct: 1752 if isinstance(distinct,(list, tuple)): 1753 distinct = xorify(distinct) 1754 sql_d = self.expand(distinct) 1755 return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ 1756 (sql_d, sql_t, sql_w) 1757 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1758
1759 - def count(self, query, distinct=None):
1760 self.execute(self._count(query, distinct)) 1761 return self.cursor.fetchone()[0]
1762
1763 - def tables(self, *queries):
1764 tables = set() 1765 for query in queries: 1766 if isinstance(query, Field): 1767 tables.add(query.tablename) 1768 elif isinstance(query, (Expression, Query)): 1769 if not query.first is None: 1770 tables = tables.union(self.tables(query.first)) 1771 if not query.second is None: 1772 tables = tables.union(self.tables(query.second)) 1773 return list(tables)
1774
1775 - def commit(self):
1776 if self.connection: return self.connection.commit()
1777
1778 - def rollback(self):
1779 if self.connection: return self.connection.rollback()
1780
1781 - def close_connection(self):
1782 if self.connection: return self.connection.close()
1783
1784 - def distributed_transaction_begin(self, key):
1785 return
1786
1787 - def prepare(self, key):
1788 if self.connection: self.connection.prepare()
1789
1790 - def commit_prepared(self, key):
1791 if self.connection: self.connection.commit()
1792
1793 - def rollback_prepared(self, key):
1794 if self.connection: self.connection.rollback()
1795
1796 - def concat_add(self, tablename):
1797 return ', ADD '
1798
1799 - def constraint_name(self, table, fieldname):
1800 return '%s_%s__constraint' % (table,fieldname)
1801
1802 - def create_sequence_and_triggers(self, query, table, **args):
1803 self.execute(query)
1804
1805 - def log_execute(self, *a, **b):
1806 if not self.connection: return None 1807 command = a[0] 1808 if hasattr(self,'filter_sql_command'): 1809 command = self.filter_sql_command(command) 1810 if self.db._debug: 1811 LOGGER.debug('SQL: %s' % command) 1812 self.db._lastsql = command 1813 t0 = time.time() 1814 ret = self.cursor.execute(command, *a[1:], **b) 1815 self.db._timings.append((command,time.time()-t0)) 1816 del self.db._timings[:-TIMINGSSIZE] 1817 return ret
1818
1819 - def execute(self, *a, **b):
1820 return self.log_execute(*a, **b)
1821
1822 - def represent(self, obj, fieldtype):
1823 field_is_type = fieldtype.startswith 1824 if isinstance(obj, CALLABLETYPES): 1825 obj = obj() 1826 if isinstance(fieldtype, SQLCustomType): 1827 value = fieldtype.encoder(obj) 1828 if fieldtype.type in ('string','text', 'json'): 1829 return self.adapt(value) 1830 return value 1831 if isinstance(obj, (Expression, Field)): 1832 return str(obj) 1833 if field_is_type('list:'): 1834 if not obj: 1835 obj = [] 1836 elif not isinstance(obj, (list, tuple)): 1837 obj = [obj] 1838 if field_is_type('list:string'): 1839 obj = map(str,obj) 1840 else: 1841 obj = map(int,[o for o in obj if o != '']) 1842 # we don't want to bar_encode json objects 1843 if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): 1844 obj = bar_encode(obj) 1845 if obj is None: 1846 return 'NULL' 1847 if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: 1848 return 'NULL' 1849 r = self.represent_exceptions(obj, fieldtype) 1850 if not r is None: 1851 return r 1852 if fieldtype == 'boolean': 1853 if obj and not str(obj)[:1].upper() in '0F': 1854 return self.smart_adapt(self.TRUE) 1855 else: 1856 return self.smart_adapt(self.FALSE) 1857 if fieldtype == 'id' or fieldtype == 'integer': 1858 return str(long(obj)) 1859 if field_is_type('decimal'): 1860 return str(obj) 1861 elif field_is_type('reference'): # reference 1862 if fieldtype.find('.')>0: 1863 return repr(obj) 1864 elif isinstance(obj, (Row, Reference)): 1865 return str(obj['id']) 1866 return str(long(obj)) 1867 elif fieldtype == 'double': 1868 return repr(float(obj)) 1869 if isinstance(obj, unicode): 1870 obj = obj.encode(self.db_codec) 1871 if fieldtype == 'blob': 1872 obj = base64.b64encode(str(obj)) 1873 elif fieldtype == 'date': 1874 if isinstance(obj, (datetime.date, datetime.datetime)): 1875 obj = obj.isoformat()[:10] 1876 else: 1877 obj = str(obj) 1878 elif fieldtype == 'datetime': 1879 if isinstance(obj, datetime.datetime): 1880 obj = obj.isoformat(self.T_SEP)[:19] 1881 elif isinstance(obj, datetime.date): 1882 obj = obj.isoformat()[:10]+self.T_SEP+'00:00:00' 1883 else: 1884 obj = str(obj) 1885 elif fieldtype == 'time': 1886 if isinstance(obj, datetime.time): 1887 obj = obj.isoformat()[:10] 1888 else: 1889 obj = str(obj) 1890 elif fieldtype == 'json': 1891 if not self.native_json: 1892 if have_serializers: 1893 obj = serializers.json(obj) 1894 elif simplejson: 1895 obj = simplejson.dumps(obj) 1896 else: 1897 raise RuntimeError("missing simplejson") 1898 if not isinstance(obj,bytes): 1899 obj = bytes(obj) 1900 try: 1901 obj.decode(self.db_codec) 1902 except: 1903 obj = obj.decode('latin1').encode(self.db_codec) 1904 return self.adapt(obj)
1905
1906 - def represent_exceptions(self, obj, fieldtype):
1907 return None
1908
1909 - def lastrowid(self, table):
1910 return None
1911
1912 - def rowslice(self, rows, minimum=0, maximum=None):
1913 """ 1914 By default this function does nothing; 1915 overload when db does not do slicing. 1916 """ 1917 return rows
1918
1919 - def parse_value(self, value, field_type, blob_decode=True):
1920 if field_type != 'blob' and isinstance(value, str): 1921 try: 1922 value = value.decode(self.db._db_codec) 1923 except Exception: 1924 pass 1925 if isinstance(value, unicode): 1926 value = value.encode('utf-8') 1927 if isinstance(field_type, SQLCustomType): 1928 value = field_type.decoder(value) 1929 if not isinstance(field_type, str) or value is None: 1930 return value 1931 elif field_type in ('string', 'text', 'password', 'upload', 'dict'): 1932 return value 1933 elif field_type.startswith('geo'): 1934 return value 1935 elif field_type == 'blob' and not blob_decode: 1936 return value 1937 else: 1938 key = REGEX_TYPE.match(field_type).group(0) 1939 return self.parsemap[key](value,field_type)
1940
1941 - def parse_reference(self, value, field_type):
1942 referee = field_type[10:].strip() 1943 if not '.' in referee: 1944 value = Reference(value) 1945 value._table, value._record = self.db[referee], None 1946 return value
1947
1948 - def parse_boolean(self, value, field_type):
1949 return value == self.TRUE or str(value)[:1].lower() == 't'
1950
1951 - def parse_date(self, value, field_type):
1952 if isinstance(value, datetime.datetime): 1953 return value.date() 1954 if not isinstance(value, (datetime.date,datetime.datetime)): 1955 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 1956 value = datetime.date(y, m, d) 1957 return value
1958
1959 - def parse_time(self, value, field_type):
1960 if not isinstance(value, datetime.time): 1961 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 1962 if len(time_items) == 3: 1963 (h, mi, s) = time_items 1964 else: 1965 (h, mi, s) = time_items + [0] 1966 value = datetime.time(h, mi, s) 1967 return value
1968
1969 - def parse_datetime(self, value, field_type):
1970 if not isinstance(value, datetime.datetime): 1971 value = str(value) 1972 date_part,time_part,timezone = value[:10],value[11:19],value[19:] 1973 if '+' in timezone: 1974 ms,tz = timezone.split('+') 1975 h,m = tz.split(':') 1976 dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1977 elif '-' in timezone: 1978 ms,tz = timezone.split('-') 1979 h,m = tz.split(':') 1980 dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1981 else: 1982 dt = None 1983 (y, m, d) = map(int,date_part.split('-')) 1984 time_parts = time_part and time_part.split(':')[:3] or (0,0,0) 1985 while len(time_parts)<3: time_parts.append(0) 1986 time_items = map(int,time_parts) 1987 (h, mi, s) = time_items 1988 value = datetime.datetime(y, m, d, h, mi, s) 1989 if dt: 1990 value = value + dt 1991 return value
1992
1993 - def parse_blob(self, value, field_type):
1994 return base64.b64decode(str(value))
1995
1996 - def parse_decimal(self, value, field_type):
1997 decimals = int(field_type[8:-1].split(',')[-1]) 1998 if self.dbengine in ('sqlite', 'spatialite'): 1999 value = ('%.' + str(decimals) + 'f') % value 2000 if not isinstance(value, decimal.Decimal): 2001 value = decimal.Decimal(str(value)) 2002 return value
2003
2004 - def parse_list_integers(self, value, field_type):
2005 if not isinstance(self, NoSQLAdapter): 2006 value = bar_decode_integer(value) 2007 return value
2008
2009 - def parse_list_references(self, value, field_type):
2010 if not isinstance(self, NoSQLAdapter): 2011 value = bar_decode_integer(value) 2012 return [self.parse_reference(r, field_type[5:]) for r in value]
2013
2014 - def parse_list_strings(self, value, field_type):
2015 if not isinstance(self, NoSQLAdapter): 2016 value = bar_decode_string(value) 2017 return value
2018
2019 - def parse_id(self, value, field_type):
2020 return long(value)
2021
2022 - def parse_integer(self, value, field_type):
2023 return long(value)
2024
2025 - def parse_double(self, value, field_type):
2026 return float(value)
2027
2028 - def parse_json(self, value, field_type):
2029 if not self.native_json: 2030 if not isinstance(value, basestring): 2031 raise RuntimeError('json data not a string') 2032 if isinstance(value, unicode): 2033 value = value.encode('utf-8') 2034 if have_serializers: 2035 value = serializers.loads_json(value) 2036 elif simplejson: 2037 value = simplejson.loads(value) 2038 else: 2039 raise RuntimeError("missing simplejson") 2040 return value
2041
2042 - def build_parsemap(self):
2043 self.parsemap = { 2044 'id':self.parse_id, 2045 'integer':self.parse_integer, 2046 'bigint':self.parse_integer, 2047 'float':self.parse_double, 2048 'double':self.parse_double, 2049 'reference':self.parse_reference, 2050 'boolean':self.parse_boolean, 2051 'date':self.parse_date, 2052 'time':self.parse_time, 2053 'datetime':self.parse_datetime, 2054 'blob':self.parse_blob, 2055 'decimal':self.parse_decimal, 2056 'json':self.parse_json, 2057 'list:integer':self.parse_list_integers, 2058 'list:reference':self.parse_list_references, 2059 'list:string':self.parse_list_strings, 2060 }
2061
2062 - def parse(self, rows, fields, colnames, blob_decode=True, 2063 cacheable = False):
2064 db = self.db 2065 virtualtables = [] 2066 new_rows = [] 2067 tmps = [] 2068 for colname in colnames: 2069 if not REGEX_TABLE_DOT_FIELD.match(colname): 2070 tmps.append(None) 2071 else: 2072 (tablename, fieldname) = colname.split('.') 2073 table = db[tablename] 2074 field = table[fieldname] 2075 ft = field.type 2076 tmps.append((tablename,fieldname,table,field,ft)) 2077 for (i,row) in enumerate(rows): 2078 new_row = Row() 2079 for (j,colname) in enumerate(colnames): 2080 value = row[j] 2081 tmp = tmps[j] 2082 if tmp: 2083 (tablename,fieldname,table,field,ft) = tmp 2084 if tablename in new_row: 2085 colset = new_row[tablename] 2086 else: 2087 colset = new_row[tablename] = Row() 2088 if tablename not in virtualtables: 2089 virtualtables.append(tablename) 2090 value = self.parse_value(value,ft,blob_decode) 2091 if field.filter_out: 2092 value = field.filter_out(value) 2093 colset[fieldname] = value 2094 2095 # for backward compatibility 2096 if ft=='id' and fieldname!='id' and \ 2097 not 'id' in table.fields: 2098 colset['id'] = value 2099 2100 if ft == 'id' and not cacheable: 2101 # temporary hack to deal with 2102 # GoogleDatastoreAdapter 2103 # references 2104 if isinstance(self, GoogleDatastoreAdapter): 2105 id = value.key().id_or_name() 2106 colset[fieldname] = id 2107 colset.gae_item = value 2108 else: 2109 id = value 2110 colset.update_record = RecordUpdater(colset,table,id) 2111 colset.delete_record = RecordDeleter(table,id) 2112 for rfield in table._referenced_by: 2113 referee_link = db._referee_name and \ 2114 db._referee_name % dict( 2115 table=rfield.tablename,field=rfield.name) 2116 if referee_link and not referee_link in colset: 2117 colset[referee_link] = LazySet(rfield,id) 2118 else: 2119 if not '_extra' in new_row: 2120 new_row['_extra'] = Row() 2121 new_row['_extra'][colname] = \ 2122 self.parse_value(value, 2123 fields[j].type,blob_decode) 2124 new_column_name = \ 2125 REGEX_SELECT_AS_PARSER.search(colname) 2126 if not new_column_name is None: 2127 column_name = new_column_name.groups(0) 2128 setattr(new_row,column_name[0],value) 2129 new_rows.append(new_row) 2130 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 2131 2132 2133 for tablename in virtualtables: 2134 table = db[tablename] 2135 fields_virtual = [(f,v) for (f,v) in table.iteritems() 2136 if isinstance(v,FieldVirtual)] 2137 fields_lazy = [(f,v) for (f,v) in table.iteritems() 2138 if isinstance(v,FieldMethod)] 2139 if fields_virtual or fields_lazy: 2140 for row in rowsobj.records: 2141 box = row[tablename] 2142 for f,v in fields_virtual: 2143 box[f] = v.f(row) 2144 for f,v in fields_lazy: 2145 box[f] = (v.handler or VirtualCommand)(v.f,row) 2146 2147 ### old style virtual fields 2148 for item in table.virtualfields: 2149 try: 2150 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 2151 except (KeyError, AttributeError): 2152 # to avoid breaking virtualfields when partial select 2153 pass 2154 return rowsobj
2155
2156 - def common_filter(self, query, tablenames):
2157 tenant_fieldname = self.db._request_tenant 2158 2159 for tablename in tablenames: 2160 table = self.db[tablename] 2161 2162 # deal with user provided filters 2163 if table._common_filter != None: 2164 query = query & table._common_filter(query) 2165 2166 # deal with multi_tenant filters 2167 if tenant_fieldname in table: 2168 default = table[tenant_fieldname].default 2169 if not default is None: 2170 newquery = table[tenant_fieldname] == default 2171 if query is None: 2172 query = newquery 2173 else: 2174 query = query & newquery 2175 return query
2176
2177 - def CASE(self,query,t,f):
2178 def represent(x): 2179 types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} 2180 if x is None: return 'NULL' 2181 elif isinstance(x,Expression): return str(x) 2182 else: return self.represent(x,types.get(type(x),'string'))
2183 return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ 2184 (self.expand(query),represent(t),represent(f))) 2185
2186 ################################################################################### 2187 # List of all the available adapters; they all extend BaseAdapter. 2188 ################################################################################### 2189 2190 -class SQLiteAdapter(BaseAdapter):
2191 drivers = ('sqlite2','sqlite3') 2192 2193 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 2194
2195 - def EXTRACT(self,field,what):
2196 return "web2py_extract('%s',%s)" % (what, self.expand(field))
2197 2198 @staticmethod
2199 - def web2py_extract(lookup, s):
2200 table = { 2201 'year': (0, 4), 2202 'month': (5, 7), 2203 'day': (8, 10), 2204 'hour': (11, 13), 2205 'minute': (14, 16), 2206 'second': (17, 19), 2207 } 2208 try: 2209 if lookup != 'epoch': 2210 (i, j) = table[lookup] 2211 return int(s[i:j]) 2212 else: 2213 return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) 2214 except: 2215 return None
2216 2217 @staticmethod
2218 - def web2py_regexp(expression, item):
2219 return re.compile(expression).search(item) is not None
2220
2221 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2222 credential_decoder=IDENTITY, driver_args={}, 2223 adapter_args={}, do_connect=True, after_connection=None):
2224 self.db = db 2225 self.dbengine = "sqlite" 2226 self.uri = uri 2227 if do_connect: self.find_driver(adapter_args) 2228 self.pool_size = 0 2229 self.folder = folder 2230 self.db_codec = db_codec 2231 self._after_connection = after_connection 2232 self.find_or_make_work_folder() 2233 path_encoding = sys.getfilesystemencoding() \ 2234 or locale.getdefaultlocale()[1] or 'utf8' 2235 if uri.startswith('sqlite:memory'): 2236 self.dbpath = ':memory:' 2237 else: 2238 self.dbpath = uri.split('://',1)[1] 2239 if self.dbpath[0] != '/': 2240 if PYTHON_VERSION == 2: 2241 self.dbpath = pjoin( 2242 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2243 else: 2244 self.dbpath = pjoin(self.folder, self.dbpath) 2245 if not 'check_same_thread' in driver_args: 2246 driver_args['check_same_thread'] = False 2247 if not 'detect_types' in driver_args and do_connect: 2248 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2249 def connector(dbpath=self.dbpath, driver_args=driver_args): 2250 return self.driver.Connection(dbpath, **driver_args)
2251 self.connector = connector 2252 if do_connect: self.reconnect()
2253
2254 - def after_connection(self):
2255 self.connection.create_function('web2py_extract', 2, 2256 SQLiteAdapter.web2py_extract) 2257 self.connection.create_function("REGEXP", 2, 2258 SQLiteAdapter.web2py_regexp)
2259
2260 - def _truncate(self, table, mode=''):
2261 tablename = table._tablename 2262 return ['DELETE FROM %s;' % tablename, 2263 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
2264
2265 - def lastrowid(self, table):
2266 return self.cursor.lastrowid
2267
2268 - def REGEXP(self,first,second):
2269 return '(%s REGEXP %s)' % (self.expand(first), 2270 self.expand(second,'string'))
2271
2272 - def select(self, query, fields, attributes):
2273 """ 2274 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 2275 Note that the entire database, rather than one record, is locked 2276 (it will be locked eventually anyway by the following UPDATE). 2277 """ 2278 if attributes.get('for_update', False) and not 'cache' in attributes: 2279 self.execute('BEGIN IMMEDIATE TRANSACTION;') 2280 return super(SQLiteAdapter, self).select(query, fields, attributes)
2281
2282 -class SpatiaLiteAdapter(SQLiteAdapter):
2283 drivers = ('sqlite3','sqlite2') 2284 2285 types = copy.copy(BaseAdapter.types) 2286 types.update(geometry='GEOMETRY') 2287
2288 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2289 credential_decoder=IDENTITY, driver_args={}, 2290 adapter_args={}, do_connect=True, srid=4326, after_connection=None):
2291 self.db = db 2292 self.dbengine = "spatialite" 2293 self.uri = uri 2294 if do_connect: self.find_driver(adapter_args) 2295 self.pool_size = 0 2296 self.folder = folder 2297 self.db_codec = db_codec 2298 self._after_connection = after_connection 2299 self.find_or_make_work_folder() 2300 self.srid = srid 2301 path_encoding = sys.getfilesystemencoding() \ 2302 or locale.getdefaultlocale()[1] or 'utf8' 2303 if uri.startswith('spatialite:memory'): 2304 self.dbpath = ':memory:' 2305 else: 2306 self.dbpath = uri.split('://',1)[1] 2307 if self.dbpath[0] != '/': 2308 self.dbpath = pjoin( 2309 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2310 if not 'check_same_thread' in driver_args: 2311 driver_args['check_same_thread'] = False 2312 if not 'detect_types' in driver_args and do_connect: 2313 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2314 def connector(dbpath=self.dbpath, driver_args=driver_args): 2315 return self.driver.Connection(dbpath, **driver_args)
2316 self.connector = connector 2317 if do_connect: self.reconnect()
2318
2319 - def after_connection(self):
2320 self.connection.enable_load_extension(True) 2321 # for Windows, rename libspatialite-2.dll to libspatialite.dll 2322 # Linux uses libspatialite.so 2323 # Mac OS X uses libspatialite.dylib 2324 libspatialite = SPATIALLIBS[platform.system()] 2325 self.execute(r'SELECT load_extension("%s");' % libspatialite) 2326 2327 self.connection.create_function('web2py_extract', 2, 2328 SQLiteAdapter.web2py_extract) 2329 self.connection.create_function("REGEXP", 2, 2330 SQLiteAdapter.web2py_regexp)
2331 2332 # GIS functions 2333
2334 - def ST_ASGEOJSON(self, first, second):
2335 return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first), 2336 second['precision'], second['options'])
2337
2338 - def ST_ASTEXT(self, first):
2339 return 'AsText(%s)' %(self.expand(first))
2340
2341 - def ST_CONTAINS(self, first, second):
2342 return 'Contains(%s,%s)' %(self.expand(first), 2343 self.expand(second, first.type))
2344
2345 - def ST_DISTANCE(self, first, second):
2346 return 'Distance(%s,%s)' %(self.expand(first), 2347 self.expand(second, first.type))
2348
2349 - def ST_EQUALS(self, first, second):
2350 return 'Equals(%s,%s)' %(self.expand(first), 2351 self.expand(second, first.type))
2352
2353 - def ST_INTERSECTS(self, first, second):
2354 return 'Intersects(%s,%s)' %(self.expand(first), 2355 self.expand(second, first.type))
2356
2357 - def ST_OVERLAPS(self, first, second):
2358 return 'Overlaps(%s,%s)' %(self.expand(first), 2359 self.expand(second, first.type))
2360
2361 - def ST_SIMPLIFY(self, first, second):
2362 return 'Simplify(%s,%s)' %(self.expand(first), 2363 self.expand(second, 'double'))
2364
2365 - def ST_TOUCHES(self, first, second):
2366 return 'Touches(%s,%s)' %(self.expand(first), 2367 self.expand(second, first.type))
2368
2369 - def ST_WITHIN(self, first, second):
2370 return 'Within(%s,%s)' %(self.expand(first), 2371 self.expand(second, first.type))
2372
2373 - def represent(self, obj, fieldtype):
2374 field_is_type = fieldtype.startswith 2375 if field_is_type('geo'): 2376 srid = 4326 # Spatialite default srid for geometry 2377 geotype, parms = fieldtype[:-1].split('(') 2378 parms = parms.split(',') 2379 if len(parms) >= 2: 2380 schema, srid = parms[:2] 2381 # if field_is_type('geometry'): 2382 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2383 # elif field_is_type('geography'): 2384 # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2385 # else: 2386 # raise SyntaxError, 'Invalid field type %s' %fieldtype 2387 return value 2388 return BaseAdapter.represent(self, obj, fieldtype)
2389
2390 2391 -class JDBCSQLiteAdapter(SQLiteAdapter):
2392 drivers = ('zxJDBC_sqlite',) 2393
2394 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 2395 credential_decoder=IDENTITY, driver_args={}, 2396 adapter_args={}, do_connect=True, after_connection=None):
2397 self.db = db 2398 self.dbengine = "sqlite" 2399 self.uri = uri 2400 if do_connect: self.find_driver(adapter_args) 2401 self.pool_size = pool_size 2402 self.folder = folder 2403 self.db_codec = db_codec 2404 self._after_connection = after_connection 2405 self.find_or_make_work_folder() 2406 path_encoding = sys.getfilesystemencoding() \ 2407 or locale.getdefaultlocale()[1] or 'utf8' 2408 if uri.startswith('sqlite:memory'): 2409 self.dbpath = ':memory:' 2410 else: 2411 self.dbpath = uri.split('://',1)[1] 2412 if self.dbpath[0] != '/': 2413 self.dbpath = pjoin( 2414 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2415 def connector(dbpath=self.dbpath,driver_args=driver_args): 2416 return self.driver.connect( 2417 self.driver.getConnection('jdbc:sqlite:'+dbpath), 2418 **driver_args)
2419 self.connector = connector 2420 if do_connect: self.reconnect()
2421
2422 - def after_connection(self):
2423 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 2424 self.connection.create_function('web2py_extract', 2, 2425 SQLiteAdapter.web2py_extract)
2426
2427 - def execute(self, a):
2428 return self.log_execute(a)
2429
2430 2431 -class MySQLAdapter(BaseAdapter):
2432 drivers = ('MySQLdb','pymysql') 2433 2434 commit_on_alter_table = True 2435 support_distributed_transaction = True 2436 types = { 2437 'boolean': 'CHAR(1)', 2438 'string': 'VARCHAR(%(length)s)', 2439 'text': 'LONGTEXT', 2440 'json': 'LONGTEXT', 2441 'password': 'VARCHAR(%(length)s)', 2442 'blob': 'LONGBLOB', 2443 'upload': 'VARCHAR(%(length)s)', 2444 'integer': 'INT', 2445 'bigint': 'BIGINT', 2446 'float': 'FLOAT', 2447 'double': 'DOUBLE', 2448 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2449 'date': 'DATE', 2450 'time': 'TIME', 2451 'datetime': 'DATETIME', 2452 'id': 'INT AUTO_INCREMENT NOT NULL', 2453 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2454 'list:integer': 'LONGTEXT', 2455 'list:string': 'LONGTEXT', 2456 'list:reference': 'LONGTEXT', 2457 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', 2458 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2459 } 2460 2461 QUOTE_TEMPLATE = "`%s`" 2462
2463 - def varquote(self,name):
2464 return varquote_aux(name,'`%s`')
2465
2466 - def RANDOM(self):
2467 return 'RAND()'
2468
2469 - def SUBSTRING(self,field,parameters):
2470 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), 2471 parameters[0], parameters[1])
2472
2473 - def EPOCH(self, first):
2474 return "UNIX_TIMESTAMP(%s)" % self.expand(first)
2475
2476 - def CONCAT(self, *items):
2477 return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
2478
2479 - def REGEXP(self,first,second):
2480 return '(%s REGEXP %s)' % (self.expand(first), 2481 self.expand(second,'string'))
2482
2483 - def _drop(self,table,mode):
2484 # breaks db integrity but without this mysql does not drop table 2485 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table, 2486 'SET FOREIGN_KEY_CHECKS=1;']
2487
2488 - def _insert_empty(self, table):
2489 return 'INSERT INTO %s VALUES (DEFAULT);' % table
2490
2491 - def distributed_transaction_begin(self,key):
2492 self.execute('XA START;')
2493
2494 - def prepare(self,key):
2495 self.execute("XA END;") 2496 self.execute("XA PREPARE;")
2497
2498 - def commit_prepared(self,ley):
2499 self.execute("XA COMMIT;")
2500
2501 - def rollback_prepared(self,key):
2502 self.execute("XA ROLLBACK;")
2503 2504 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 2505
2506 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2507 credential_decoder=IDENTITY, driver_args={}, 2508 adapter_args={}, do_connect=True, after_connection=None):
2509 self.db = db 2510 self.dbengine = "mysql" 2511 self.uri = uri 2512 if do_connect: self.find_driver(adapter_args,uri) 2513 self.pool_size = pool_size 2514 self.folder = folder 2515 self.db_codec = db_codec 2516 self._after_connection = after_connection 2517 self.find_or_make_work_folder() 2518 ruri = uri.split('://',1)[1] 2519 m = self.REGEX_URI.match(ruri) 2520 if not m: 2521 raise SyntaxError( 2522 "Invalid URI string in DAL: %s" % self.uri) 2523 user = credential_decoder(m.group('user')) 2524 if not user: 2525 raise SyntaxError('User required') 2526 password = credential_decoder(m.group('password')) 2527 if not password: 2528 password = '' 2529 host = m.group('host') 2530 if not host: 2531 raise SyntaxError('Host name required') 2532 db = m.group('db') 2533 if not db: 2534 raise SyntaxError('Database name required') 2535 port = int(m.group('port') or '3306') 2536 charset = m.group('charset') or 'utf8' 2537 driver_args.update(db=db, 2538 user=credential_decoder(user), 2539 passwd=credential_decoder(password), 2540 host=host, 2541 port=port, 2542 charset=charset) 2543 2544 2545 def connector(driver_args=driver_args): 2546 return self.driver.connect(**driver_args)
2547 self.connector = connector 2548 if do_connect: self.reconnect()
2549
2550 - def after_connection(self):
2551 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2552 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2553
2554 - def lastrowid(self,table):
2555 self.execute('select last_insert_id();') 2556 return int(self.cursor.fetchone()[0])
2557
2558 2559 -class PostgreSQLAdapter(BaseAdapter):
2560 drivers = ('psycopg2','pg8000') 2561 2562 support_distributed_transaction = True 2563 types = { 2564 'boolean': 'CHAR(1)', 2565 'string': 'VARCHAR(%(length)s)', 2566 'text': 'TEXT', 2567 'json': 'TEXT', 2568 'password': 'VARCHAR(%(length)s)', 2569 'blob': 'BYTEA', 2570 'upload': 'VARCHAR(%(length)s)', 2571 'integer': 'INTEGER', 2572 'bigint': 'BIGINT', 2573 'float': 'FLOAT', 2574 'double': 'FLOAT8', 2575 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2576 'date': 'DATE', 2577 'time': 'TIME', 2578 'datetime': 'TIMESTAMP', 2579 'id': 'SERIAL PRIMARY KEY', 2580 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2581 'list:integer': 'TEXT', 2582 'list:string': 'TEXT', 2583 'list:reference': 'TEXT', 2584 'geometry': 'GEOMETRY', 2585 'geography': 'GEOGRAPHY', 2586 'big-id': 'BIGSERIAL PRIMARY KEY', 2587 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2588 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2589 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2590 2591 } 2592 2593 QUOTE_TEMPLATE = '%s' 2594
2595 - def varquote(self,name):
2596 return varquote_aux(name,'"%s"')
2597
2598 - def adapt(self,obj):
2599 if self.driver_name == 'psycopg2': 2600 return psycopg2_adapt(obj).getquoted() 2601 elif self.driver_name == 'pg8000': 2602 return "'%s'" % str(obj).replace("%","%%").replace("'","''") 2603 else: 2604 return "'%s'" % str(obj).replace("'","''")
2605
2606 - def sequence_name(self,table):
2607 return '%s_id_Seq' % table
2608
2609 - def RANDOM(self):
2610 return 'RANDOM()'
2611
2612 - def ADD(self, first, second):
2613 t = first.type 2614 if t in ('text','string','password', 'json', 'upload','blob'): 2615 return '(%s || %s)' % (self.expand(first), self.expand(second, t)) 2616 else: 2617 return '(%s + %s)' % (self.expand(first), self.expand(second, t))
2618
2619 - def distributed_transaction_begin(self,key):
2620 return
2621
2622 - def prepare(self,key):
2623 self.execute("PREPARE TRANSACTION '%s';" % key)
2624
2625 - def commit_prepared(self,key):
2626 self.execute("COMMIT PREPARED '%s';" % key)
2627
2628 - def rollback_prepared(self,key):
2629 self.execute("ROLLBACK PREPARED '%s';" % key)
2630
2631 - def create_sequence_and_triggers(self, query, table, **args):
2632 # following lines should only be executed if table._sequence_name does not exist 2633 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2634 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2635 # % (table._tablename, table._fieldname, table._sequence_name)) 2636 self.execute(query)
2637 2638 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 2639
2640 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2641 credential_decoder=IDENTITY, driver_args={}, 2642 adapter_args={}, do_connect=True, srid=4326, 2643 after_connection=None):
2644 self.db = db 2645 self.dbengine = "postgres" 2646 self.uri = uri 2647 if do_connect: self.find_driver(adapter_args,uri) 2648 self.pool_size = pool_size 2649 self.folder = folder 2650 self.db_codec = db_codec 2651 self._after_connection = after_connection 2652 self.srid = srid 2653 self.find_or_make_work_folder() 2654 ruri = uri.split('://',1)[1] 2655 m = self.REGEX_URI.match(ruri) 2656 if not m: 2657 raise SyntaxError("Invalid URI string in DAL") 2658 user = credential_decoder(m.group('user')) 2659 if not user: 2660 raise SyntaxError('User required') 2661 password = credential_decoder(m.group('password')) 2662 if not password: 2663 password = '' 2664 host = m.group('host') 2665 if not host: 2666 raise SyntaxError('Host name required') 2667 db = m.group('db') 2668 if not db: 2669 raise SyntaxError('Database name required') 2670 port = m.group('port') or '5432' 2671 sslmode = m.group('sslmode') 2672 if sslmode: 2673 msg = ("dbname='%s' user='%s' host='%s' " 2674 "port=%s password='%s' sslmode='%s'") \ 2675 % (db, user, host, port, password, sslmode) 2676 else: 2677 msg = ("dbname='%s' user='%s' host='%s' " 2678 "port=%s password='%s'") \ 2679 % (db, user, host, port, password) 2680 # choose diver according uri 2681 if self.driver: 2682 self.__version__ = "%s %s" % (self.driver.__name__, 2683 self.driver.__version__) 2684 else: 2685 self.__version__ = None 2686 def connector(msg=msg,driver_args=driver_args): 2687 return self.driver.connect(msg,**driver_args)
2688 self.connector = connector 2689 if do_connect: self.reconnect()
2690
2691 - def after_connection(self):
2692 self.connection.set_client_encoding('UTF8') 2693 self.execute("SET standard_conforming_strings=on;") 2694 self.try_json()
2695
2696 - def lastrowid(self,table):
2697 self.execute("select currval('%s')" % table._sequence_name) 2698 return int(self.cursor.fetchone()[0])
2699
2700 - def try_json(self):
2701 # check JSON data type support 2702 # (to be added to after_connection) 2703 if self.driver_name == "pg8000": 2704 supports_json = self.connection.server_version >= "9.2.0" 2705 elif (self.driver_name == "psycopg2") and \ 2706 (self.driver.__version__ >= "2.0.12"): 2707 supports_json = self.connection.server_version >= 90200 2708 elif self.driver_name == "zxJDBC": 2709 supports_json = self.connection.dbversion >= "9.2.0" 2710 else: supports_json = None 2711 if supports_json: 2712 self.types["json"] = "JSON" 2713 self.native_json = True 2714 else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
2715
2716 - def LIKE(self,first,second):
2717 args = (self.expand(first), self.expand(second,'string')) 2718 if not first.type in ('string', 'text', 'json'): 2719 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2720 else: 2721 return '(%s LIKE %s)' % args
2722
2723 - def ILIKE(self,first,second):
2724 args = (self.expand(first), self.expand(second,'string')) 2725 if not first.type in ('string', 'text', 'json'): 2726 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2727 else: 2728 return '(%s ILIKE %s)' % args
2729
2730 - def REGEXP(self,first,second):
2731 return '(%s ~ %s)' % (self.expand(first), 2732 self.expand(second,'string'))
2733
2734 - def STARTSWITH(self,first,second):
2735 return '(%s ILIKE %s)' % (self.expand(first), 2736 self.expand(second+'%','string'))
2737
2738 - def ENDSWITH(self,first,second):
2739 return '(%s ILIKE %s)' % (self.expand(first), 2740 self.expand('%'+second,'string'))
2741 2742 # GIS functions 2743
2744 - def ST_ASGEOJSON(self, first, second):
2745 """ 2746 http://postgis.org/docs/ST_AsGeoJSON.html 2747 """ 2748 return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], 2749 self.expand(first), second['precision'], second['options'])
2750
2751 - def ST_ASTEXT(self, first):
2752 """ 2753 http://postgis.org/docs/ST_AsText.html 2754 """ 2755 return 'ST_AsText(%s)' %(self.expand(first))
2756
2757 - def ST_X(self, first):
2758 """ 2759 http://postgis.org/docs/ST_X.html 2760 """ 2761 return 'ST_X(%s)' %(self.expand(first))
2762
2763 - def ST_Y(self, first):
2764 """ 2765 http://postgis.org/docs/ST_Y.html 2766 """ 2767 return 'ST_Y(%s)' %(self.expand(first))
2768
2769 - def ST_CONTAINS(self, first, second):
2770 """ 2771 http://postgis.org/docs/ST_Contains.html 2772 """ 2773 return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2774
2775 - def ST_DISTANCE(self, first, second):
2776 """ 2777 http://postgis.org/docs/ST_Distance.html 2778 """ 2779 return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2780
2781 - def ST_EQUALS(self, first, second):
2782 """ 2783 http://postgis.org/docs/ST_Equals.html 2784 """ 2785 return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2786
2787 - def ST_INTERSECTS(self, first, second):
2788 """ 2789 http://postgis.org/docs/ST_Intersects.html 2790 """ 2791 return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2792
2793 - def ST_OVERLAPS(self, first, second):
2794 """ 2795 http://postgis.org/docs/ST_Overlaps.html 2796 """ 2797 return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2798
2799 - def ST_SIMPLIFY(self, first, second):
2800 """ 2801 http://postgis.org/docs/ST_Simplify.html 2802 """ 2803 return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
2804
2805 - def ST_TOUCHES(self, first, second):
2806 """ 2807 http://postgis.org/docs/ST_Touches.html 2808 """ 2809 return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2810
2811 - def ST_WITHIN(self, first, second):
2812 """ 2813 http://postgis.org/docs/ST_Within.html 2814 """ 2815 return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2816
2817 - def represent(self, obj, fieldtype):
2818 field_is_type = fieldtype.startswith 2819 if field_is_type('geo'): 2820 srid = 4326 # postGIS default srid for geometry 2821 geotype, parms = fieldtype[:-1].split('(') 2822 parms = parms.split(',') 2823 if len(parms) >= 2: 2824 schema, srid = parms[:2] 2825 if field_is_type('geometry'): 2826 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2827 elif field_is_type('geography'): 2828 value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2829 # else: 2830 # raise SyntaxError('Invalid field type %s' %fieldtype) 2831 return value 2832 return BaseAdapter.represent(self, obj, fieldtype)
2833
2834 -class NewPostgreSQLAdapter(PostgreSQLAdapter):
2835 drivers = ('psycopg2','pg8000') 2836 2837 types = { 2838 'boolean': 'CHAR(1)', 2839 'string': 'VARCHAR(%(length)s)', 2840 'text': 'TEXT', 2841 'json': 'TEXT', 2842 'password': 'VARCHAR(%(length)s)', 2843 'blob': 'BYTEA', 2844 'upload': 'VARCHAR(%(length)s)', 2845 'integer': 'INTEGER', 2846 'bigint': 'BIGINT', 2847 'float': 'FLOAT', 2848 'double': 'FLOAT8', 2849 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2850 'date': 'DATE', 2851 'time': 'TIME', 2852 'datetime': 'TIMESTAMP', 2853 'id': 'SERIAL PRIMARY KEY', 2854 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2855 'list:integer': 'BIGINT[]', 2856 'list:string': 'TEXT[]', 2857 'list:reference': 'BIGINT[]', 2858 'geometry': 'GEOMETRY', 2859 'geography': 'GEOGRAPHY', 2860 'big-id': 'BIGSERIAL PRIMARY KEY', 2861 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2862 } 2863
2864 - def parse_list_integers(self, value, field_type):
2865 return value
2866
2867 - def parse_list_references(self, value, field_type):
2868 return [self.parse_reference(r, field_type[5:]) for r in value]
2869
2870 - def parse_list_strings(self, value, field_type):
2871 return value
2872
2873 - def represent(self, obj, fieldtype):
2874 field_is_type = fieldtype.startswith 2875 if field_is_type('list:'): 2876 if not obj: 2877 obj = [] 2878 elif not isinstance(obj, (list, tuple)): 2879 obj = [obj] 2880 if field_is_type('list:string'): 2881 obj = map(str,obj) 2882 else: 2883 obj = map(int,obj) 2884 return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) 2885 return BaseAdapter.represent(self, obj, fieldtype)
2886
2887 2888 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
2889 drivers = ('zxJDBC',) 2890 2891 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 2892
2893 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2894 credential_decoder=IDENTITY, driver_args={}, 2895 adapter_args={}, do_connect=True, after_connection=None ):
2896 self.db = db 2897 self.dbengine = "postgres" 2898 self.uri = uri 2899 if do_connect: self.find_driver(adapter_args,uri) 2900 self.pool_size = pool_size 2901 self.folder = folder 2902 self.db_codec = db_codec 2903 self._after_connection = after_connection 2904 self.find_or_make_work_folder() 2905 ruri = uri.split('://',1)[1] 2906 m = self.REGEX_URI.match(ruri) 2907 if not m: 2908 raise SyntaxError("Invalid URI string in DAL") 2909 user = credential_decoder(m.group('user')) 2910 if not user: 2911 raise SyntaxError('User required') 2912 password = credential_decoder(m.group('password')) 2913 if not password: 2914 password = '' 2915 host = m.group('host') 2916 if not host: 2917 raise SyntaxError('Host name required') 2918 db = m.group('db') 2919 if not db: 2920 raise SyntaxError('Database name required') 2921 port = m.group('port') or '5432' 2922 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 2923 def connector(msg=msg,driver_args=driver_args): 2924 return self.driver.connect(*msg,**driver_args)
2925 self.connector = connector 2926 if do_connect: self.reconnect()
2927
2928 - def after_connection(self):
2929 self.connection.set_client_encoding('UTF8') 2930 self.execute('BEGIN;') 2931 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 2932 self.try_json()
2933
2934 2935 -class OracleAdapter(BaseAdapter):
2936 drivers = ('cx_Oracle',) 2937 2938 commit_on_alter_table = False 2939 types = { 2940 'boolean': 'CHAR(1)', 2941 'string': 'VARCHAR2(%(length)s)', 2942 'text': 'CLOB', 2943 'json': 'CLOB', 2944 'password': 'VARCHAR2(%(length)s)', 2945 'blob': 'CLOB', 2946 'upload': 'VARCHAR2(%(length)s)', 2947 'integer': 'INT', 2948 'bigint': 'NUMBER', 2949 'float': 'FLOAT', 2950 'double': 'BINARY_DOUBLE', 2951 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2952 'date': 'DATE', 2953 'time': 'CHAR(8)', 2954 'datetime': 'DATE', 2955 'id': 'NUMBER PRIMARY KEY', 2956 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2957 'list:integer': 'CLOB', 2958 'list:string': 'CLOB', 2959 'list:reference': 'CLOB', 2960 'big-id': 'NUMBER PRIMARY KEY', 2961 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2962 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2963 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2964 } 2965
2966 - def sequence_name(self,tablename):
2967 return '%s_sequence' % tablename
2968
2969 - def trigger_name(self,tablename):
2970 return '%s_trigger' % tablename
2971
2972 - def LEFT_JOIN(self):
2973 return 'LEFT OUTER JOIN'
2974
2975 - def RANDOM(self):
2976 return 'dbms_random.value'
2977
2978 - def NOT_NULL(self,default,field_type):
2979 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2980
2981 - def _drop(self,table,mode):
2982 sequence_name = table._sequence_name 2983 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
2984
2985 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2986 if limitby: 2987 (lmin, lmax) = limitby 2988 if len(sql_w) > 1: 2989 sql_w_row = sql_w + ' AND w_row > %i' % lmin 2990 else: 2991 sql_w_row = 'WHERE w_row > %i' % lmin 2992 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 2993 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2994
2995 - def constraint_name(self, tablename, fieldname):
2996 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 2997 if len(constraint_name)>30: 2998 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 2999 return constraint_name
3000
3001 - def represent_exceptions(self, obj, fieldtype):
3002 if fieldtype == 'blob': 3003 obj = base64.b64encode(str(obj)) 3004 return ":CLOB('%s')" % obj 3005 elif fieldtype == 'date': 3006 if isinstance(obj, (datetime.date, datetime.datetime)): 3007 obj = obj.isoformat()[:10] 3008 else: 3009 obj = str(obj) 3010 return "to_date('%s','yyyy-mm-dd')" % obj 3011 elif fieldtype == 'datetime': 3012 if isinstance(obj, datetime.datetime): 3013 obj = obj.isoformat()[:19].replace('T',' ') 3014 elif isinstance(obj, datetime.date): 3015 obj = obj.isoformat()[:10]+' 00:00:00' 3016 else: 3017 obj = str(obj) 3018 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 3019 return None
3020
3021 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3022 credential_decoder=IDENTITY, driver_args={}, 3023 adapter_args={}, do_connect=True, after_connection=None):
3024 self.db = db 3025 self.dbengine = "oracle" 3026 self.uri = uri 3027 if do_connect: self.find_driver(adapter_args,uri) 3028 self.pool_size = pool_size 3029 self.folder = folder 3030 self.db_codec = db_codec 3031 self._after_connection = after_connection 3032 self.find_or_make_work_folder() 3033 ruri = uri.split('://',1)[1] 3034 if not 'threaded' in driver_args: 3035 driver_args['threaded']=True 3036 def connector(uri=ruri,driver_args=driver_args): 3037 return self.driver.connect(uri,**driver_args)
3038 self.connector = connector 3039 if do_connect: self.reconnect()
3040
3041 - def after_connection(self):
3042 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 3043 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
3044 3045 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 3046
3047 - def execute(self, command, args=None):
3048 args = args or [] 3049 i = 1 3050 while True: 3051 m = self.oracle_fix.match(command) 3052 if not m: 3053 break 3054 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 3055 args.append(m.group('clob')[6:-2].replace("''", "'")) 3056 i += 1 3057 if command[-1:]==';': 3058 command = command[:-1] 3059 return self.log_execute(command, args)
3060
3061 - def create_sequence_and_triggers(self, query, table, **args):
3062 tablename = table._tablename 3063 id_name = table._id.name 3064 sequence_name = table._sequence_name 3065 trigger_name = table._trigger_name 3066 self.execute(query) 3067 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) 3068 self.execute(""" 3069 CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW 3070 DECLARE 3071 curr_val NUMBER; 3072 diff_val NUMBER; 3073 PRAGMA autonomous_transaction; 3074 BEGIN 3075 IF :NEW.%(id)s IS NOT NULL THEN 3076 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3077 diff_val := :NEW.%(id)s - curr_val - 1; 3078 IF diff_val != 0 THEN 3079 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; 3080 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3081 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; 3082 END IF; 3083 END IF; 3084 SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL; 3085 END; 3086 """ % dict(trigger_name=trigger_name, tablename=tablename, 3087 sequence_name=sequence_name,id=id_name))
3088
3089 - def lastrowid(self,table):
3090 sequence_name = table._sequence_name 3091 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 3092 return long(self.cursor.fetchone()[0])
3093 3094 #def parse_value(self, value, field_type, blob_decode=True): 3095 # if blob_decode and isinstance(value, cx_Oracle.LOB): 3096 # try: 3097 # value = value.read() 3098 # except self.driver.ProgrammingError: 3099 # # After a subsequent fetch the LOB value is not valid anymore 3100 # pass 3101 # return BaseAdapter.parse_value(self, value, field_type, blob_decode) 3102
3103 - def _fetchall(self):
3104 if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): 3105 return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ 3106 for c in r]) for r in self.cursor] 3107 else: 3108 return self.cursor.fetchall()
3109
3110 -class MSSQLAdapter(BaseAdapter):
3111 drivers = ('pyodbc',) 3112 T_SEP = 'T' 3113 3114 QUOTE_TEMPLATE = "[%s]" 3115 3116 types = { 3117 'boolean': 'BIT', 3118 'string': 'VARCHAR(%(length)s)', 3119 'text': 'TEXT', 3120 'json': 'TEXT', 3121 'password': 'VARCHAR(%(length)s)', 3122 'blob': 'IMAGE', 3123 'upload': 'VARCHAR(%(length)s)', 3124 'integer': 'INT', 3125 'bigint': 'BIGINT', 3126 'float': 'FLOAT', 3127 'double': 'FLOAT', 3128 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3129 'date': 'DATETIME', 3130 'time': 'CHAR(8)', 3131 'datetime': 'DATETIME', 3132 'id': 'INT IDENTITY PRIMARY KEY', 3133 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3134 'list:integer': 'TEXT', 3135 'list:string': 'TEXT', 3136 'list:reference': 'TEXT', 3137 'geometry': 'geometry', 3138 'geography': 'geography', 3139 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3140 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3141 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3142 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3143 } 3144
3145 - def concat_add(self,tablename):
3146 return '; ALTER TABLE %s ADD ' % tablename
3147
3148 - def varquote(self,name):
3149 return varquote_aux(name,'[%s]')
3150
3151 - def EXTRACT(self,field,what):
3152 return "DATEPART(%s,%s)" % (what, self.expand(field))
3153
3154 - def LEFT_JOIN(self):
3155 return 'LEFT OUTER JOIN'
3156
3157 - def RANDOM(self):
3158 return 'NEWID()'
3159
3160 - def ALLOW_NULL(self):
3161 return ' NULL'
3162
3163 - def SUBSTRING(self,field,parameters):
3164 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
3165
3166 - def PRIMARY_KEY(self,key):
3167 return 'PRIMARY KEY CLUSTERED (%s)' % key
3168
3169 - def AGGREGATE(self, first, what):
3170 if what == 'LENGTH': 3171 what = 'LEN' 3172 return "%s(%s)" % (what, self.expand(first))
3173 3174
3175 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3176 if limitby: 3177 (lmin, lmax) = limitby 3178 sql_s += ' TOP %i' % lmax 3179 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3180 3181 TRUE = 1 3182 FALSE = 0 3183 3184 REGEX_DSN = re.compile('^(?P<dsn>.+)$') 3185 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$') 3186 REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 3187
3188 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3189 credential_decoder=IDENTITY, driver_args={}, 3190 adapter_args={}, do_connect=True, srid=4326, 3191 after_connection=None):
3192 self.db = db 3193 self.dbengine = "mssql" 3194 self.uri = uri 3195 if do_connect: self.find_driver(adapter_args,uri) 3196 self.pool_size = pool_size 3197 self.folder = folder 3198 self.db_codec = db_codec 3199 self._after_connection = after_connection 3200 self.srid = srid 3201 self.find_or_make_work_folder() 3202 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3203 ruri = uri.split('://',1)[1] 3204 if '@' not in ruri: 3205 try: 3206 m = self.REGEX_DSN.match(ruri) 3207 if not m: 3208 raise SyntaxError( 3209 'Parsing uri string(%s) has no result' % self.uri) 3210 dsn = m.group('dsn') 3211 if not dsn: 3212 raise SyntaxError('DSN required') 3213 except SyntaxError: 3214 e = sys.exc_info()[1] 3215 LOGGER.error('NdGpatch error') 3216 raise e 3217 # was cnxn = 'DSN=%s' % dsn 3218 cnxn = dsn 3219 else: 3220 m = self.REGEX_URI.match(ruri) 3221 if not m: 3222 raise SyntaxError( 3223 "Invalid URI string in DAL: %s" % self.uri) 3224 user = credential_decoder(m.group('user')) 3225 if not user: 3226 raise SyntaxError('User required') 3227 password = credential_decoder(m.group('password')) 3228 if not password: 3229 password = '' 3230 host = m.group('host') 3231 if not host: 3232 raise SyntaxError('Host name required') 3233 db = m.group('db') 3234 if not db: 3235 raise SyntaxError('Database name required') 3236 port = m.group('port') or '1433' 3237 # Parse the optional url name-value arg pairs after the '?' 3238 # (in the form of arg1=value1&arg2=value2&...) 3239 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 3240 argsdict = { 'DRIVER':'{SQL Server}' } 3241 urlargs = m.group('urlargs') or '' 3242 for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): 3243 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 3244 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) 3245 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 3246 % (host, port, db, user, password, urlargs) 3247 def connector(cnxn=cnxn,driver_args=driver_args): 3248 return self.driver.connect(cnxn,**driver_args)
3249 self.connector = connector 3250 if do_connect: self.reconnect()
3251
3252 - def lastrowid(self,table):
3253 #self.execute('SELECT @@IDENTITY;') 3254 self.execute('SELECT SCOPE_IDENTITY();') 3255 return long(self.cursor.fetchone()[0])
3256
3257 - def rowslice(self,rows,minimum=0,maximum=None):
3258 if maximum is None: 3259 return rows[minimum:] 3260 return rows[minimum:maximum]
3261
3262 - def EPOCH(self, first):
3263 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3264
3265 - def CONCAT(self, *items):
3266 return '(%s)' % ' + '.join(self.expand(x,'string') for x in items)
3267 3268 # GIS Spatial Extensions 3269 3270 # No STAsGeoJSON in MSSQL 3271
3272 - def ST_ASTEXT(self, first):
3273 return '%s.STAsText()' %(self.expand(first))
3274
3275 - def ST_CONTAINS(self, first, second):
3276 return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
3277
3278 - def ST_DISTANCE(self, first, second):
3279 return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
3280
3281 - def ST_EQUALS(self, first, second):
3282 return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
3283
3284 - def ST_INTERSECTS(self, first, second):
3285 return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
3286
3287 - def ST_OVERLAPS(self, first, second):
3288 return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
3289 3290 # no STSimplify in MSSQL 3291
3292 - def ST_TOUCHES(self, first, second):
3293 return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
3294
3295 - def ST_WITHIN(self, first, second):
3296 return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
3297
3298 - def represent(self, obj, fieldtype):
3299 field_is_type = fieldtype.startswith 3300 if field_is_type('geometry'): 3301 srid = 0 # MS SQL default srid for geometry 3302 geotype, parms = fieldtype[:-1].split('(') 3303 if parms: 3304 srid = parms 3305 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3306 elif fieldtype == 'geography': 3307 srid = 4326 # MS SQL default srid for geography 3308 geotype, parms = fieldtype[:-1].split('(') 3309 if parms: 3310 srid = parms 3311 return "geography::STGeomFromText('%s',%s)" %(obj, srid) 3312 # else: 3313 # raise SyntaxError('Invalid field type %s' %fieldtype) 3314 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3315 return BaseAdapter.represent(self, obj, fieldtype)
3316
3317 3318 -class MSSQL3Adapter(MSSQLAdapter):
3319 """ experimental support for pagination in MSSQL"""
3320 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3321 if limitby: 3322 (lmin, lmax) = limitby 3323 if lmin == 0: 3324 sql_s += ' TOP %i' % lmax 3325 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) 3326 lmin += 1 3327 sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:] 3328 sql_g_inner = sql_o[:sql_o.find('ORDER BY ')] 3329 sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))] 3330 sql_f_inner = [f for f in sql_f.split(',')] 3331 sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] 3332 sql_f_iproxy = ', '.join(sql_f_iproxy) 3333 sql_f_oproxy = ', '.join(sql_f_outer) 3334 return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) 3335 return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
3336 - def rowslice(self,rows,minimum=0,maximum=None):
3337 return rows
3338
3339 3340 -class MSSQL2Adapter(MSSQLAdapter):
3341 drivers = ('pyodbc',) 3342 3343 types = { 3344 'boolean': 'CHAR(1)', 3345 'string': 'NVARCHAR(%(length)s)', 3346 'text': 'NTEXT', 3347 'json': 'NTEXT', 3348 'password': 'NVARCHAR(%(length)s)', 3349 'blob': 'IMAGE', 3350 'upload': 'NVARCHAR(%(length)s)', 3351 'integer': 'INT', 3352 'bigint': 'BIGINT', 3353 'float': 'FLOAT', 3354 'double': 'FLOAT', 3355 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3356 'date': 'DATETIME', 3357 'time': 'CHAR(8)', 3358 'datetime': 'DATETIME', 3359 'id': 'INT IDENTITY PRIMARY KEY', 3360 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3361 'list:integer': 'NTEXT', 3362 'list:string': 'NTEXT', 3363 'list:reference': 'NTEXT', 3364 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3365 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3366 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3367 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3368 } 3369
3370 - def represent(self, obj, fieldtype):
3371 value = BaseAdapter.represent(self, obj, fieldtype) 3372 if fieldtype in ('string','text', 'json') and value[:1]=="'": 3373 value = 'N'+value 3374 return value
3375
3376 - def execute(self,a):
3377 return self.log_execute(a.decode('utf8'))
3378
3379 -class VerticaAdapter(MSSQLAdapter):
3380 drivers = ('pyodbc',) 3381 T_SEP = ' ' 3382 3383 types = { 3384 'boolean': 'BOOLEAN', 3385 'string': 'VARCHAR(%(length)s)', 3386 'text': 'BYTEA', 3387 'json': 'VARCHAR(%(length)s)', 3388 'password': 'VARCHAR(%(length)s)', 3389 'blob': 'BYTEA', 3390 'upload': 'VARCHAR(%(length)s)', 3391 'integer': 'INT', 3392 'bigint': 'BIGINT', 3393 'float': 'FLOAT', 3394 'double': 'DOUBLE PRECISION', 3395 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3396 'date': 'DATE', 3397 'time': 'TIME', 3398 'datetime': 'DATETIME', 3399 'id': 'IDENTITY', 3400 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3401 'list:integer': 'BYTEA', 3402 'list:string': 'BYTEA', 3403 'list:reference': 'BYTEA', 3404 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3405 } 3406 3407
3408 - def EXTRACT(self, first, what):
3409 return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first))
3410
3411 - def _truncate(self, table, mode=''):
3412 tablename = table._tablename 3413 return ['TRUNCATE %s %s;' % (tablename, mode or '')]
3414
3415 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3416 if limitby: 3417 (lmin, lmax) = limitby 3418 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 3419 return 'SELECT %s %s FROM %s%s%s;' % \ 3420 (sql_s, sql_f, sql_t, sql_w, sql_o)
3421
3422 - def lastrowid(self,table):
3423 self.execute('SELECT LAST_INSERT_ID();') 3424 return long(self.cursor.fetchone()[0])
3425
3426 - def execute(self, a):
3427 return self.log_execute(a)
3428
3429 -class SybaseAdapter(MSSQLAdapter):
3430 drivers = ('Sybase',) 3431 3432 types = { 3433 'boolean': 'BIT', 3434 'string': 'CHAR VARYING(%(length)s)', 3435 'text': 'TEXT', 3436 'json': 'TEXT', 3437 'password': 'CHAR VARYING(%(length)s)', 3438 'blob': 'IMAGE', 3439 'upload': 'CHAR VARYING(%(length)s)', 3440 'integer': 'INT', 3441 'bigint': 'BIGINT', 3442 'float': 'FLOAT', 3443 'double': 'FLOAT', 3444 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3445 'date': 'DATETIME', 3446 'time': 'CHAR(8)', 3447 'datetime': 'DATETIME', 3448 'id': 'INT IDENTITY PRIMARY KEY', 3449 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3450 'list:integer': 'TEXT', 3451 'list:string': 'TEXT', 3452 'list:reference': 'TEXT', 3453 'geometry': 'geometry', 3454 'geography': 'geography', 3455 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3456 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3457 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3458 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3459 } 3460 3461
3462 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3463 credential_decoder=IDENTITY, driver_args={}, 3464 adapter_args={}, do_connect=True, srid=4326, 3465 after_connection=None):
3466 self.db = db 3467 self.dbengine = "sybase" 3468 self.uri = uri 3469 if do_connect: self.find_driver(adapter_args,uri) 3470 self.pool_size = pool_size 3471 self.folder = folder 3472 self.db_codec = db_codec 3473 self._after_connection = after_connection 3474 self.srid = srid 3475 self.find_or_make_work_folder() 3476 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3477 ruri = uri.split('://',1)[1] 3478 if '@' not in ruri: 3479 try: 3480 m = self.REGEX_DSN.match(ruri) 3481 if not m: 3482 raise SyntaxError( 3483 'Parsing uri string(%s) has no result' % self.uri) 3484 dsn = m.group('dsn') 3485 if not dsn: 3486 raise SyntaxError('DSN required') 3487 except SyntaxError: 3488 e = sys.exc_info()[1] 3489 LOGGER.error('NdGpatch error') 3490 raise e 3491 else: 3492 m = self.REGEX_URI.match(uri) 3493 if not m: 3494 raise SyntaxError( 3495 "Invalid URI string in DAL: %s" % self.uri) 3496 user = credential_decoder(m.group('user')) 3497 if not user: 3498 raise SyntaxError('User required') 3499 password = credential_decoder(m.group('password')) 3500 if not password: 3501 password = '' 3502 host = m.group('host') 3503 if not host: 3504 raise SyntaxError('Host name required') 3505 db = m.group('db') 3506 if not db: 3507 raise SyntaxError('Database name required') 3508 port = m.group('port') or '1433' 3509 3510 dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) 3511 3512 driver_args.update(user = credential_decoder(user), 3513 password = credential_decoder(password)) 3514 3515 def connector(dsn=dsn,driver_args=driver_args): 3516 return self.driver.connect(dsn,**driver_args)
3517 self.connector = connector 3518 if do_connect: self.reconnect()
3519
3520 3521 -class FireBirdAdapter(BaseAdapter):
3522 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3523 3524 commit_on_alter_table = False 3525 support_distributed_transaction = True 3526 types = { 3527 'boolean': 'CHAR(1)', 3528 'string': 'VARCHAR(%(length)s)', 3529 'text': 'BLOB SUB_TYPE 1', 3530 'json': 'BLOB SUB_TYPE 1', 3531 'password': 'VARCHAR(%(length)s)', 3532 'blob': 'BLOB SUB_TYPE 0', 3533 'upload': 'VARCHAR(%(length)s)', 3534 'integer': 'INTEGER', 3535 'bigint': 'BIGINT', 3536 'float': 'FLOAT', 3537 'double': 'DOUBLE PRECISION', 3538 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3539 'date': 'DATE', 3540 'time': 'TIME', 3541 'datetime': 'TIMESTAMP', 3542 'id': 'INTEGER PRIMARY KEY', 3543 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3544 'list:integer': 'BLOB SUB_TYPE 1', 3545 'list:string': 'BLOB SUB_TYPE 1', 3546 'list:reference': 'BLOB SUB_TYPE 1', 3547 'big-id': 'BIGINT PRIMARY KEY', 3548 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3549 } 3550
3551 - def sequence_name(self,tablename):
3552 return 'genid_%s' % tablename
3553
3554 - def trigger_name(self,tablename):
3555 return 'trg_id_%s' % tablename
3556
3557 - def RANDOM(self):
3558 return 'RAND()'
3559
3560 - def EPOCH(self, first):
3561 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3562
3563 - def NOT_NULL(self,default,field_type):
3564 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3565
3566 - def SUBSTRING(self,field,parameters):
3567 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
3568
3569 - def LENGTH(self, first):
3570 return "CHAR_LENGTH(%s)" % self.expand(first)
3571
3572 - def CONTAINS(self,first,second,case_sensitive=False):
3573 if first.type.startswith('list:'): 3574 second = Expression(None,self.CONCAT('|',Expression( 3575 None,self.REPLACE(second,('|','||'))),'|')) 3576 return '(%s CONTAINING %s)' % (self.expand(first), 3577 self.expand(second, 'string'))
3578
3579 - def _drop(self,table,mode):
3580 sequence_name = table._sequence_name 3581 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
3582
3583 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3584 if limitby: 3585 (lmin, lmax) = limitby 3586 sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s) 3587 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3588
3589 - def _truncate(self,table,mode = ''):
3590 return ['DELETE FROM %s;' % table._tablename, 3591 'SET GENERATOR %s TO 0;' % table._sequence_name]
3592 3593 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$') 3594
3595 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3596 credential_decoder=IDENTITY, driver_args={}, 3597 adapter_args={}, do_connect=True, after_connection=None):
3598 self.db = db 3599 self.dbengine = "firebird" 3600 self.uri = uri 3601 if do_connect: self.find_driver(adapter_args,uri) 3602 self.pool_size = pool_size 3603 self.folder = folder 3604 self.db_codec = db_codec 3605 self._after_connection = after_connection 3606 self.find_or_make_work_folder() 3607 ruri = uri.split('://',1)[1] 3608 m = self.REGEX_URI.match(ruri) 3609 if not m: 3610 raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) 3611 user = credential_decoder(m.group('user')) 3612 if not user: 3613 raise SyntaxError('User required') 3614 password = credential_decoder(m.group('password')) 3615 if not password: 3616 password = '' 3617 host = m.group('host') 3618 if not host: 3619 raise SyntaxError('Host name required') 3620 port = int(m.group('port') or 3050) 3621 db = m.group('db') 3622 if not db: 3623 raise SyntaxError('Database name required') 3624 charset = m.group('charset') or 'UTF8' 3625 driver_args.update(dsn='%s/%s:%s' % (host,port,db), 3626 user = credential_decoder(user), 3627 password = credential_decoder(password), 3628 charset = charset) 3629 3630 def connector(driver_args=driver_args): 3631 return self.driver.connect(**driver_args)
3632 self.connector = connector 3633 if do_connect: self.reconnect()
3634
3635 - def create_sequence_and_triggers(self, query, table, **args):
3636 tablename = table._tablename 3637 sequence_name = table._sequence_name 3638 trigger_name = table._trigger_name 3639 self.execute(query) 3640 self.execute('create generator %s;' % sequence_name) 3641 self.execute('set generator %s to 0;' % sequence_name) 3642 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
3643
3644 - def lastrowid(self,table):
3645 sequence_name = table._sequence_name 3646 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 3647 return long(self.cursor.fetchone()[0])
3648
3649 3650 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
3651 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3652 3653 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$') 3654
3655 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3656 credential_decoder=IDENTITY, driver_args={}, 3657 adapter_args={}, do_connect=True, after_connection=None):
3658 self.db = db 3659 self.dbengine = "firebird" 3660 self.uri = uri 3661 if do_connect: self.find_driver(adapter_args,uri) 3662 self.pool_size = pool_size 3663 self.folder = folder 3664 self.db_codec = db_codec 3665 self._after_connection = after_connection 3666 self.find_or_make_work_folder() 3667 ruri = uri.split('://',1)[1] 3668 m = self.REGEX_URI.match(ruri) 3669 if not m: 3670 raise SyntaxError( 3671 "Invalid URI string in DAL: %s" % self.uri) 3672 user = credential_decoder(m.group('user')) 3673 if not user: 3674 raise SyntaxError('User required') 3675 password = credential_decoder(m.group('password')) 3676 if not password: 3677 password = '' 3678 pathdb = m.group('path') 3679 if not pathdb: 3680 raise SyntaxError('Path required') 3681 charset = m.group('charset') 3682 if not charset: 3683 charset = 'UTF8' 3684 host = '' 3685 driver_args.update(host=host, 3686 database=pathdb, 3687 user=credential_decoder(user), 3688 password=credential_decoder(password), 3689 charset=charset) 3690 3691 def connector(driver_args=driver_args): 3692 return self.driver.connect(**driver_args)
3693 self.connector = connector 3694 if do_connect: self.reconnect()
3695
3696 -class InformixAdapter(BaseAdapter):
3697 drivers = ('informixdb',) 3698 3699 types = { 3700 'boolean': 'CHAR(1)', 3701 'string': 'VARCHAR(%(length)s)', 3702 'text': 'BLOB SUB_TYPE 1', 3703 'json': 'BLOB SUB_TYPE 1', 3704 'password': 'VARCHAR(%(length)s)', 3705 'blob': 'BLOB SUB_TYPE 0', 3706 'upload': 'VARCHAR(%(length)s)', 3707 'integer': 'INTEGER', 3708 'bigint': 'BIGINT', 3709 'float': 'FLOAT', 3710 'double': 'DOUBLE PRECISION', 3711 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3712 'date': 'DATE', 3713 'time': 'CHAR(8)', 3714 'datetime': 'DATETIME', 3715 'id': 'SERIAL', 3716 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3717 'list:integer': 'BLOB SUB_TYPE 1', 3718 'list:string': 'BLOB SUB_TYPE 1', 3719 'list:reference': 'BLOB SUB_TYPE 1', 3720 'big-id': 'BIGSERIAL', 3721 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3722 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 3723 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 3724 } 3725
3726 - def RANDOM(self):
3727 return 'Random()'
3728
3729 - def NOT_NULL(self,default,field_type):
3730 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3731
3732 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3733 if limitby: 3734 (lmin, lmax) = limitby 3735 fetch_amt = lmax - lmin 3736 dbms_version = int(self.connection.dbms_version.split('.')[0]) 3737 if lmin and (dbms_version >= 10): 3738 # Requires Informix 10.0+ 3739 sql_s += ' SKIP %d' % (lmin, ) 3740 if fetch_amt and (dbms_version >= 9): 3741 # Requires Informix 9.0+ 3742 sql_s += ' FIRST %d' % (fetch_amt, ) 3743 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3744
3745 - def represent_exceptions(self, obj, fieldtype):
3746 if fieldtype == 'date': 3747 if isinstance(obj, (datetime.date, datetime.datetime)): 3748 obj = obj.isoformat()[:10] 3749 else: 3750 obj = str(obj) 3751 return "to_date('%s','%%Y-%%m-%%d')" % obj 3752 elif fieldtype == 'datetime': 3753 if isinstance(obj, datetime.datetime): 3754 obj = obj.isoformat()[:19].replace('T',' ') 3755 elif isinstance(obj, datetime.date): 3756 obj = obj.isoformat()[:10]+' 00:00:00' 3757 else: 3758 obj = str(obj) 3759 return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj 3760 return None
3761 3762 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3763
3764 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3765 credential_decoder=IDENTITY, driver_args={}, 3766 adapter_args={}, do_connect=True, after_connection=None):
3767 self.db = db 3768 self.dbengine = "informix" 3769 self.uri = uri 3770 if do_connect: self.find_driver(adapter_args,uri) 3771 self.pool_size = pool_size 3772 self.folder = folder 3773 self.db_codec = db_codec 3774 self._after_connection = after_connection 3775 self.find_or_make_work_folder() 3776 ruri = uri.split('://',1)[1] 3777 m = self.REGEX_URI.match(ruri) 3778 if not m: 3779 raise SyntaxError( 3780 "Invalid URI string in DAL: %s" % self.uri) 3781 user = credential_decoder(m.group('user')) 3782 if not user: 3783 raise SyntaxError('User required') 3784 password = credential_decoder(m.group('password')) 3785 if not password: 3786 password = '' 3787 host = m.group('host') 3788 if not host: 3789 raise SyntaxError('Host name required') 3790 db = m.group('db') 3791 if not db: 3792 raise SyntaxError('Database name required') 3793 user = credential_decoder(user) 3794 password = credential_decoder(password) 3795 dsn = '%s@%s' % (db,host) 3796 driver_args.update(user=user,password=password,autocommit=True) 3797 def connector(dsn=dsn,driver_args=driver_args): 3798 return self.driver.connect(dsn,**driver_args)
3799 self.connector = connector 3800 if do_connect: self.reconnect()
3801
3802 - def execute(self,command):
3803 if command[-1:]==';': 3804 command = command[:-1] 3805 return self.log_execute(command)
3806
3807 - def lastrowid(self,table):
3808 return self.cursor.sqlerrd[1]
3809
3810 -class InformixSEAdapter(InformixAdapter):
3811 """ work in progress """ 3812
3813 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3814 return 'SELECT %s %s FROM %s%s%s;' % \ 3815 (sql_s, sql_f, sql_t, sql_w, sql_o)
3816
3817 - def rowslice(self,rows,minimum=0,maximum=None):
3818 if maximum is None: 3819 return rows[minimum:] 3820 return rows[minimum:maximum]
3821
3822 -class DB2Adapter(BaseAdapter):
3823 drivers = ('pyodbc',) 3824 3825 types = { 3826 'boolean': 'CHAR(1)', 3827 'string': 'VARCHAR(%(length)s)', 3828 'text': 'CLOB', 3829 'json': 'CLOB', 3830 'password': 'VARCHAR(%(length)s)', 3831 'blob': 'BLOB', 3832 'upload': 'VARCHAR(%(length)s)', 3833 'integer': 'INT', 3834 'bigint': 'BIGINT', 3835 'float': 'REAL', 3836 'double': 'DOUBLE', 3837 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3838 'date': 'DATE', 3839 'time': 'TIME', 3840 'datetime': 'TIMESTAMP', 3841 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3842 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3843 'list:integer': 'CLOB', 3844 'list:string': 'CLOB', 3845 'list:reference': 'CLOB', 3846 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3847 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3848 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3849 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3850 } 3851
3852 - def LEFT_JOIN(self):
3853 return 'LEFT OUTER JOIN'
3854
3855 - def RANDOM(self):
3856 return 'RAND()'
3857
3858 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3859 if limitby: 3860 (lmin, lmax) = limitby 3861 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 3862 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3863
3864 - def represent_exceptions(self, obj, fieldtype):
3865 if fieldtype == 'blob': 3866 obj = base64.b64encode(str(obj)) 3867 return "BLOB('%s')" % obj 3868 elif fieldtype == 'datetime': 3869 if isinstance(obj, datetime.datetime): 3870 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 3871 elif isinstance(obj, datetime.date): 3872 obj = obj.isoformat()[:10]+'-00.00.00' 3873 return "'%s'" % obj 3874 return None
3875
3876 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3877 credential_decoder=IDENTITY, driver_args={}, 3878 adapter_args={}, do_connect=True, after_connection=None):
3879 self.db = db 3880 self.dbengine = "db2" 3881 self.uri = uri 3882 if do_connect: self.find_driver(adapter_args,uri) 3883 self.pool_size = pool_size 3884 self.folder = folder 3885 self.db_codec = db_codec 3886 self._after_connection = after_connection 3887 self.find_or_make_work_folder() 3888 ruri = uri.split('://', 1)[1] 3889 def connector(cnxn=ruri,driver_args=driver_args): 3890 return self.driver.connect(cnxn,**driver_args)
3891 self.connector = connector 3892 if do_connect: self.reconnect()
3893
3894 - def execute(self,command):
3895 if command[-1:]==';': 3896 command = command[:-1] 3897 return self.log_execute(command)
3898
3899 - def lastrowid(self,table):
3900 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 3901 return long(self.cursor.fetchone()[0])
3902
3903 - def rowslice(self,rows,minimum=0,maximum=None):
3904 if maximum is None: 3905 return rows[minimum:] 3906 return rows[minimum:maximum]
3907
3908 3909 -class TeradataAdapter(BaseAdapter):
3910 drivers = ('pyodbc',) 3911 3912 types = { 3913 'boolean': 'CHAR(1)', 3914 'string': 'VARCHAR(%(length)s)', 3915 'text': 'CLOB', 3916 'json': 'CLOB', 3917 'password': 'VARCHAR(%(length)s)', 3918 'blob': 'BLOB', 3919 'upload': 'VARCHAR(%(length)s)', 3920 'integer': 'INT', 3921 'bigint': 'BIGINT', 3922 'float': 'REAL', 3923 'double': 'DOUBLE', 3924 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3925 'date': 'DATE', 3926 'time': 'TIME', 3927 'datetime': 'TIMESTAMP', 3928 # Modified Constraint syntax for Teradata. 3929 # Teradata does not support ON DELETE. 3930 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3931 'reference': 'INT', 3932 'list:integer': 'CLOB', 3933 'list:string': 'CLOB', 3934 'list:reference': 'CLOB', 3935 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3936 'big-reference': 'BIGINT', 3937 'reference FK': ' REFERENCES %(foreign_key)s', 3938 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', 3939 } 3940
3941 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3942 credential_decoder=IDENTITY, driver_args={}, 3943 adapter_args={}, do_connect=True, after_connection=None):
3944 self.db = db 3945 self.dbengine = "teradata" 3946 self.uri = uri 3947 if do_connect: self.find_driver(adapter_args,uri) 3948 self.pool_size = pool_size 3949 self.folder = folder 3950 self.db_codec = db_codec 3951 self._after_connection = after_connection 3952 self.find_or_make_work_folder() 3953 ruri = uri.split('://', 1)[1] 3954 def connector(cnxn=ruri,driver_args=driver_args): 3955 return self.driver.connect(cnxn,**driver_args)
3956 self.connector = connector 3957 if do_connect: self.reconnect()
3958
3959 - def LEFT_JOIN(self):
3960 return 'LEFT OUTER JOIN'
3961 3962 # Similar to MSSQL, Teradata can't specify a range (for Pageby)
3963 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3964 if limitby: 3965 (lmin, lmax) = limitby 3966 sql_s += ' TOP %i' % lmax 3967 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3968
3969 - def _truncate(self, table, mode=''):
3970 tablename = table._tablename 3971 return ['DELETE FROM %s ALL;' % (tablename)]
3972 3973 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
3974 # (ANSI-SQL wants this form of name 3975 # to be a delimited identifier) 3976 3977 -class IngresAdapter(BaseAdapter):
3978 drivers = ('pyodbc',) 3979 3980 types = { 3981 'boolean': 'CHAR(1)', 3982 'string': 'VARCHAR(%(length)s)', 3983 'text': 'CLOB', 3984 'json': 'CLOB', 3985 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 3986 'blob': 'BLOB', 3987 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 3988 'integer': 'INTEGER4', # or int8... 3989 'bigint': 'BIGINT', 3990 'float': 'FLOAT', 3991 'double': 'FLOAT8', 3992 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3993 'date': 'ANSIDATE', 3994 'time': 'TIME WITHOUT TIME ZONE', 3995 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 3996 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, 3997 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3998 'list:integer': 'CLOB', 3999 'list:string': 'CLOB', 4000 'list:reference': 'CLOB', 4001 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, 4002 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4003 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4004 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4005 } 4006
4007 - def LEFT_JOIN(self):
4008 return 'LEFT OUTER JOIN'
4009
4010 - def RANDOM(self):
4011 return 'RANDOM()'
4012
4013 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4014 if limitby: 4015 (lmin, lmax) = limitby 4016 fetch_amt = lmax - lmin 4017 if fetch_amt: 4018 sql_s += ' FIRST %d ' % (fetch_amt, ) 4019 if lmin: 4020 # Requires Ingres 9.2+ 4021 sql_o += ' OFFSET %d' % (lmin, ) 4022 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4023
4024 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4025 credential_decoder=IDENTITY, driver_args={}, 4026 adapter_args={}, do_connect=True, after_connection=None):
4027 self.db = db 4028 self.dbengine = "ingres" 4029 self._driver = pyodbc 4030 self.uri = uri 4031 if do_connect: self.find_driver(adapter_args,uri) 4032 self.pool_size = pool_size 4033 self.folder = folder 4034 self.db_codec = db_codec 4035 self._after_connection = after_connection 4036 self.find_or_make_work_folder() 4037 connstr = uri.split(':', 1)[1] 4038 # Simple URI processing 4039 connstr = connstr.lstrip() 4040 while connstr.startswith('/'): 4041 connstr = connstr[1:] 4042 if '=' in connstr: 4043 # Assume we have a regular ODBC connection string and just use it 4044 ruri = connstr 4045 else: 4046 # Assume only (local) dbname is passed in with OS auth 4047 database_name = connstr 4048 default_driver_name = 'Ingres' 4049 vnode = '(local)' 4050 servertype = 'ingres' 4051 ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name) 4052 def connector(cnxn=ruri,driver_args=driver_args): 4053 return self.driver.connect(cnxn,**driver_args)
4054 4055 self.connector = connector 4056 4057 # TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns 4058 if do_connect: self.reconnect()
4059
4060 - def create_sequence_and_triggers(self, query, table, **args):
4061 # post create table auto inc code (if needed) 4062 # modify table to btree for performance.... 4063 # Older Ingres releases could use rule/trigger like Oracle above. 4064 if hasattr(table,'_primarykey'): 4065 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 4066 (table._tablename, 4067 ', '.join(["'%s'" % x for x in table.primarykey])) 4068 self.execute(modify_tbl_sql) 4069 else: 4070 tmp_seqname='%s_iisq' % table._tablename 4071 query=query.replace(INGRES_SEQNAME, tmp_seqname) 4072 self.execute('create sequence %s' % tmp_seqname) 4073 self.execute(query) 4074 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
4075 4076
4077 - def lastrowid(self,table):
4078 tmp_seqname='%s_iisq' % table 4079 self.execute('select current value for %s' % tmp_seqname) 4080 return long(self.cursor.fetchone()[0]) # don't really need int type cast here...
4081
4082 4083 -class IngresUnicodeAdapter(IngresAdapter):
4084 4085 drivers = ('pyodbc',) 4086 4087 types = { 4088 'boolean': 'CHAR(1)', 4089 'string': 'NVARCHAR(%(length)s)', 4090 'text': 'NCLOB', 4091 'json': 'NCLOB', 4092 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4093 'blob': 'BLOB', 4094 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4095 'integer': 'INTEGER4', # or int8... 4096 'bigint': 'BIGINT', 4097 'float': 'FLOAT', 4098 'double': 'FLOAT8', 4099 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4100 'date': 'ANSIDATE', 4101 'time': 'TIME WITHOUT TIME ZONE', 4102 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4103 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, 4104 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4105 'list:integer': 'NCLOB', 4106 'list:string': 'NCLOB', 4107 'list:reference': 'NCLOB', 4108 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, 4109 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4110 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4111 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4112 }
4113
4114 -class SAPDBAdapter(BaseAdapter):
4115 drivers = ('sapdb',) 4116 4117 support_distributed_transaction = False 4118 types = { 4119 'boolean': 'CHAR(1)', 4120 'string': 'VARCHAR(%(length)s)', 4121 'text': 'LONG', 4122 'json': 'LONG', 4123 'password': 'VARCHAR(%(length)s)', 4124 'blob': 'LONG', 4125 'upload': 'VARCHAR(%(length)s)', 4126 'integer': 'INT', 4127 'bigint': 'BIGINT', 4128 'float': 'FLOAT', 4129 'double': 'DOUBLE PRECISION', 4130 'decimal': 'FIXED(%(precision)s,%(scale)s)', 4131 'date': 'DATE', 4132 'time': 'TIME', 4133 'datetime': 'TIMESTAMP', 4134 'id': 'INT PRIMARY KEY', 4135 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4136 'list:integer': 'LONG', 4137 'list:string': 'LONG', 4138 'list:reference': 'LONG', 4139 'big-id': 'BIGINT PRIMARY KEY', 4140 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4141 } 4142
4143 - def sequence_name(self,table):
4144 return '%s_id_Seq' % table
4145
4146 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4147 if limitby: 4148 (lmin, lmax) = limitby 4149 if len(sql_w) > 1: 4150 sql_w_row = sql_w + ' AND w_row > %i' % lmin 4151 else: 4152 sql_w_row = 'WHERE w_row > %i' % lmin 4153 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 4154 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4155
4156 - def create_sequence_and_triggers(self, query, table, **args):
4157 # following lines should only be executed if table._sequence_name does not exist 4158 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 4159 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 4160 % (table._tablename, table._id.name, table._sequence_name)) 4161 self.execute(query)
4162 4163 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 4164 4165
4166 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4167 credential_decoder=IDENTITY, driver_args={}, 4168 adapter_args={}, do_connect=True, after_connection=None):
4169 self.db = db 4170 self.dbengine = "sapdb" 4171 self.uri = uri 4172 if do_connect: self.find_driver(adapter_args,uri) 4173 self.pool_size = pool_size 4174 self.folder = folder 4175 self.db_codec = db_codec 4176 self._after_connection = after_connection 4177 self.find_or_make_work_folder() 4178 ruri = uri.split('://',1)[1] 4179 m = self.REGEX_URI.match(ruri) 4180 if not m: 4181 raise SyntaxError("Invalid URI string in DAL") 4182 user = credential_decoder(m.group('user')) 4183 if not user: 4184 raise SyntaxError('User required') 4185 password = credential_decoder(m.group('password')) 4186 if not password: 4187 password = '' 4188 host = m.group('host') 4189 if not host: 4190 raise SyntaxError('Host name required') 4191 db = m.group('db') 4192 if not db: 4193 raise SyntaxError('Database name required') 4194 def connector(user=user, password=password, database=db, 4195 host=host, driver_args=driver_args): 4196 return self.driver.Connection(user, password, database, 4197 host, **driver_args)
4198 self.connector = connector 4199 if do_connect: self.reconnect()
4200
4201 - def lastrowid(self,table):
4202 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 4203 return long(self.cursor.fetchone()[0])
4204
4205 -class CubridAdapter(MySQLAdapter):
4206 drivers = ('cubriddb',) 4207 4208 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 4209
4210 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 4211 credential_decoder=IDENTITY, driver_args={}, 4212 adapter_args={}, do_connect=True, after_connection=None):
4213 self.db = db 4214 self.dbengine = "cubrid" 4215 self.uri = uri 4216 if do_connect: self.find_driver(adapter_args,uri) 4217 self.pool_size = pool_size 4218 self.folder = folder 4219 self.db_codec = db_codec 4220 self._after_connection = after_connection 4221 self.find_or_make_work_folder() 4222 ruri = uri.split('://',1)[1] 4223 m = self.REGEX_URI.match(ruri) 4224 if not m: 4225 raise SyntaxError( 4226 "Invalid URI string in DAL: %s" % self.uri) 4227 user = credential_decoder(m.group('user')) 4228 if not user: 4229 raise SyntaxError('User required') 4230 password = credential_decoder(m.group('password')) 4231 if not password: 4232 password = '' 4233 host = m.group('host') 4234 if not host: 4235 raise SyntaxError('Host name required') 4236 db = m.group('db') 4237 if not db: 4238 raise SyntaxError('Database name required') 4239 port = int(m.group('port') or '30000') 4240 charset = m.group('charset') or 'utf8' 4241 user = credential_decoder(user) 4242 passwd = credential_decoder(password) 4243 def connector(host=host,port=port,db=db, 4244 user=user,passwd=password,driver_args=driver_args): 4245 return self.driver.connect(host,port,db,user,passwd,**driver_args)
4246 self.connector = connector 4247 if do_connect: self.reconnect()
4248
4249 - def after_connection(self):
4250 self.execute('SET FOREIGN_KEY_CHECKS=1;') 4251 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4252
4253 4254 ######## GAE MySQL ########## 4255 4256 -class DatabaseStoredFile:
4257 4258 web2py_filesystem = False 4259
4260 - def escape(self,obj):
4261 return self.db._adapter.escape(obj)
4262
4263 - def __init__(self,db,filename,mode):
4264 if not db._adapter.dbengine in ('mysql', 'postgres', 'sqlite'): 4265 raise RuntimeError("only MySQL/Postgres/SQLite can store metadata .table files in database for now") 4266 self.db = db 4267 self.filename = filename 4268 self.mode = mode 4269 if not self.web2py_filesystem: 4270 if db._adapter.dbengine == 'mysql': 4271 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;" 4272 elif db._adapter.dbengine in ('postgres', 'sqlite'): 4273 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" 4274 self.db.executesql(sql) 4275 DatabaseStoredFile.web2py_filesystem = True 4276 self.p=0 4277 self.data = '' 4278 if mode in ('r','rw','a'): 4279 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 4280 % filename 4281 rows = self.db.executesql(query) 4282 if rows: 4283 self.data = rows[0][0] 4284 elif exists(filename): 4285 datafile = open(filename, 'r') 4286 try: 4287 self.data = datafile.read() 4288 finally: 4289 datafile.close() 4290 elif mode in ('r','rw'): 4291 raise RuntimeError("File %s does not exist" % filename)
4292
4293 - def read(self, bytes):
4294 data = self.data[self.p:self.p+bytes] 4295 self.p += len(data) 4296 return data
4297
4298 - def readline(self):
4299 i = self.data.find('\n',self.p)+1 4300 if i>0: 4301 data, self.p = self.data[self.p:i], i 4302 else: 4303 data, self.p = self.data[self.p:], len(self.data) 4304 return data
4305
4306 - def write(self,data):
4307 self.data += data
4308
4309 - def close_connection(self):
4310 if self.db is not None: 4311 self.db.executesql( 4312 "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 4313 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ 4314 % (self.filename, self.data.replace("'","''")) 4315 self.db.executesql(query) 4316 self.db.commit() 4317 self.db = None
4318
4319 - def close(self):
4320 self.close_connection()
4321 4322 @staticmethod
4323 - def exists(db, filename):
4324 if exists(filename): 4325 return True 4326 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 4327 try: 4328 if db.executesql(query): 4329 return True 4330 except Exception, e: 4331 if not db._adapter.isOperationalError(e): 4332 raise 4333 # no web2py_filesystem found? 4334 tb = traceback.format_exc() 4335 LOGGER.error("Could not retrieve %s\n%s" % (filename, tb)) 4336 return False
4337
4338 4339 -class UseDatabaseStoredFile:
4340
4341 - def file_exists(self, filename):
4342 return DatabaseStoredFile.exists(self.db,filename)
4343
4344 - def file_open(self, filename, mode='rb', lock=True):
4345 return DatabaseStoredFile(self.db,filename,mode)
4346
4347 - def file_close(self, fileobj):
4348 fileobj.close_connection()
4349
4350 - def file_delete(self,filename):
4351 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 4352 self.db.executesql(query) 4353 self.db.commit()
4354
4355 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
4356 uploads_in_blob = True 4357 4358 REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$') 4359
4360 - def __init__(self, db, uri='google:sql://realm:domain/database', 4361 pool_size=0, folder=None, db_codec='UTF-8', 4362 credential_decoder=IDENTITY, driver_args={}, 4363 adapter_args={}, do_connect=True, after_connection=None):
4364 4365 self.db = db 4366 self.dbengine = "mysql" 4367 self.uri = uri 4368 self.pool_size = pool_size 4369 self.db_codec = db_codec 4370 self._after_connection = after_connection 4371 self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( 4372 os.sep+'applications'+os.sep,1)[1]) 4373 ruri = uri.split("://")[1] 4374 m = self.REGEX_URI.match(ruri) 4375 if not m: 4376 raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri) 4377 instance = credential_decoder(m.group('instance')) 4378 self.dbstring = db = credential_decoder(m.group('db')) 4379 driver_args['instance'] = instance 4380 if not 'charset' in driver_args: 4381 driver_args['charset'] = 'utf8' 4382 self.createdb = createdb = adapter_args.get('createdb',True) 4383 if not createdb: 4384 driver_args['database'] = db 4385 def connector(driver_args=driver_args): 4386 return rdbms.connect(**driver_args)
4387 self.connector = connector 4388 if do_connect: self.reconnect()
4389
4390 - def after_connection(self):
4391 if self.createdb: 4392 # self.execute('DROP DATABASE %s' % self.dbstring) 4393 self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring) 4394 self.execute('USE %s' % self.dbstring) 4395 self.execute("SET FOREIGN_KEY_CHECKS=1;") 4396 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4397
4398 - def execute(self, command, *a, **b):
4399 return self.log_execute(command.decode('utf8'), *a, **b)
4400
4401 -class NoSQLAdapter(BaseAdapter):
4402 can_select_for_update = False 4403 4404 @staticmethod
4405 - def to_unicode(obj):
4406 if isinstance(obj, str): 4407 return obj.decode('utf8') 4408 elif not isinstance(obj, unicode): 4409 return unicode(obj) 4410 return obj
4411
4412 - def id_query(self, table):
4413 return table._id > 0
4414
4415 - def represent(self, obj, fieldtype):
4416 field_is_type = fieldtype.startswith 4417 if isinstance(obj, CALLABLETYPES): 4418 obj = obj() 4419 if isinstance(fieldtype, SQLCustomType): 4420 return fieldtype.encoder(obj) 4421 if isinstance(obj, (Expression, Field)): 4422 raise SyntaxError("non supported on GAE") 4423 if self.dbengine == 'google:datastore': 4424 if isinstance(fieldtype, gae.Property): 4425 return obj 4426 is_string = isinstance(fieldtype,str) 4427 is_list = is_string and field_is_type('list:') 4428 if is_list: 4429 if not obj: 4430 obj = [] 4431 if not isinstance(obj, (list, tuple)): 4432 obj = [obj] 4433 if obj == '' and not \ 4434 (is_string and fieldtype[:2] in ['st','te', 'pa','up']): 4435 return None 4436 if not obj is None: 4437 if isinstance(obj, list) and not is_list: 4438 obj = [self.represent(o, fieldtype) for o in obj] 4439 elif fieldtype in ('integer','bigint','id'): 4440 obj = long(obj) 4441 elif fieldtype == 'double': 4442 obj = float(obj) 4443 elif is_string and field_is_type('reference'): 4444 if isinstance(obj, (Row, Reference)): 4445 obj = obj['id'] 4446 obj = long(obj) 4447 elif fieldtype == 'boolean': 4448 if obj and not str(obj)[0].upper() in '0F': 4449 obj = True 4450 else: 4451 obj = False 4452 elif fieldtype == 'date': 4453 if not isinstance(obj, datetime.date): 4454 (y, m, d) = map(int,str(obj).strip().split('-')) 4455 obj = datetime.date(y, m, d) 4456 elif isinstance(obj,datetime.datetime): 4457 (y, m, d) = (obj.year, obj.month, obj.day) 4458 obj = datetime.date(y, m, d) 4459 elif fieldtype == 'time': 4460 if not isinstance(obj, datetime.time): 4461 time_items = map(int,str(obj).strip().split(':')[:3]) 4462 if len(time_items) == 3: 4463 (h, mi, s) = time_items 4464 else: 4465 (h, mi, s) = time_items + [0] 4466 obj = datetime.time(h, mi, s) 4467 elif fieldtype == 'datetime': 4468 if not isinstance(obj, datetime.datetime): 4469 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 4470 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 4471 while len(time_items)<3: 4472 time_items.append(0) 4473 (h, mi, s) = time_items 4474 obj = datetime.datetime(y, m, d, h, mi, s) 4475 elif fieldtype == 'blob': 4476 pass 4477 elif fieldtype == 'json': 4478 if isinstance(obj, basestring): 4479 obj = self.to_unicode(obj) 4480 if have_serializers: 4481 obj = serializers.loads_json(obj) 4482 elif simplejson: 4483 obj = simplejson.loads(obj) 4484 else: 4485 raise RuntimeError("missing simplejson") 4486 elif is_string and field_is_type('list:string'): 4487 return map(self.to_unicode,obj) 4488 elif is_list: 4489 return map(int,obj) 4490 else: 4491 obj = self.to_unicode(obj) 4492 return obj
4493
4494 - def _insert(self,table,fields):
4495 return 'insert %s in %s' % (fields, table)
4496
4497 - def _count(self,query,distinct=None):
4498 return 'count %s' % repr(query)
4499
4500 - def _select(self,query,fields,attributes):
4501 return 'select %s where %s' % (repr(fields), repr(query))
4502
4503 - def _delete(self,tablename, query):
4504 return 'delete %s where %s' % (repr(tablename),repr(query))
4505
4506 - def _update(self,tablename,query,fields):
4507 return 'update %s (%s) where %s' % (repr(tablename), 4508 repr(fields),repr(query))
4509
4510 - def commit(self):
4511 """ 4512 remember: no transactions on many NoSQL 4513 """ 4514 pass
4515
4516 - def rollback(self):
4517 """ 4518 remember: no transactions on many NoSQL 4519 """ 4520 pass
4521
4522 - def close_connection(self):
4523 """ 4524 remember: no transactions on many NoSQL 4525 """ 4526 pass
4527 4528 4529 # these functions should never be called!
4530 - def OR(self,first,second): raise SyntaxError("Not supported")
4531 - def AND(self,first,second): raise SyntaxError("Not supported")
4532 - def AS(self,first,second): raise SyntaxError("Not supported")
4533 - def ON(self,first,second): raise SyntaxError("Not supported")
4534 - def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
4535 - def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
4536 - def ADD(self,first,second): raise SyntaxError("Not supported")
4537 - def SUB(self,first,second): raise SyntaxError("Not supported")
4538 - def MUL(self,first,second): raise SyntaxError("Not supported")
4539 - def DIV(self,first,second): raise SyntaxError("Not supported")
4540 - def LOWER(self,first): raise SyntaxError("Not supported")
4541 - def UPPER(self,first): raise SyntaxError("Not supported")
4542 - def EXTRACT(self,first,what): raise SyntaxError("Not supported")
4543 - def LENGTH(self, first): raise SyntaxError("Not supported")
4544 - def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
4545 - def LEFT_JOIN(self): raise SyntaxError("Not supported")
4546 - def RANDOM(self): raise SyntaxError("Not supported")
4547 - def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
4548 - def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
4549 - def ILIKE(self,first,second): raise SyntaxError("Not supported")
4550 - def drop(self,table,mode): raise SyntaxError("Not supported")
4551 - def alias(self,table,alias): raise SyntaxError("Not supported")
4552 - def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
4553 - def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
4554 - def prepare(self,key): raise SyntaxError("Not supported")
4555 - def commit_prepared(self,key): raise SyntaxError("Not supported")
4556 - def rollback_prepared(self,key): raise SyntaxError("Not supported")
4557 - def concat_add(self,table): raise SyntaxError("Not supported")
4558 - def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
4559 - def create_sequence_and_triggers(self, query, table, **args): pass
4560 - def log_execute(self,*a,**b): raise SyntaxError("Not supported")
4561 - def execute(self,*a,**b): raise SyntaxError("Not supported")
4562 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
4563 - def lastrowid(self,table): raise SyntaxError("Not supported")
4564 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
4565
4566 4567 -class GAEF(object):
4568 - def __init__(self,name,op,value,apply):
4569 self.name=name=='id' and '__key__' or name 4570 self.op=op 4571 self.value=value 4572 self.apply=apply
4573 - def __repr__(self):
4574 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
4575
4576 -class GoogleDatastoreAdapter(NoSQLAdapter):
4577 uploads_in_blob = True 4578 types = {} 4579
4580 - def file_exists(self, filename): pass
4581 - def file_open(self, filename, mode='rb', lock=True): pass
4582 - def file_close(self, fileobj): pass
4583 4584 REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)') 4585
4586 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4587 credential_decoder=IDENTITY, driver_args={}, 4588 adapter_args={}, do_connect=True, after_connection=None):
4589 self.types.update({ 4590 'boolean': gae.BooleanProperty, 4591 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), 4592 'text': gae.TextProperty, 4593 'json': gae.TextProperty, 4594 'password': gae.StringProperty, 4595 'blob': gae.BlobProperty, 4596 'upload': gae.StringProperty, 4597 'integer': gae.IntegerProperty, 4598 'bigint': gae.IntegerProperty, 4599 'float': gae.FloatProperty, 4600 'double': gae.FloatProperty, 4601 'decimal': GAEDecimalProperty, 4602 'date': gae.DateProperty, 4603 'time': gae.TimeProperty, 4604 'datetime': gae.DateTimeProperty, 4605 'id': None, 4606 'reference': gae.IntegerProperty, 4607 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), 4608 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4609 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4610 }) 4611 self.db = db 4612 self.uri = uri 4613 self.dbengine = 'google:datastore' 4614 self.folder = folder 4615 db['_lastsql'] = '' 4616 self.db_codec = 'UTF-8' 4617 self._after_connection = after_connection 4618 self.pool_size = 0 4619 match = self.REGEX_NAMESPACE.match(uri) 4620 if match: 4621 namespace_manager.set_namespace(match.group('namespace'))
4622
4623 - def parse_id(self, value, field_type):
4624 return value
4625
4626 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
4627 myfields = {} 4628 for field in table: 4629 if isinstance(polymodel,Table) and field.name in polymodel.fields(): 4630 continue 4631 attr = {} 4632 if isinstance(field.custom_qualifier, dict): 4633 #this is custom properties to add to the GAE field declartion 4634 attr = field.custom_qualifier 4635 field_type = field.type 4636 if isinstance(field_type, SQLCustomType): 4637 ftype = self.types[field_type.native or field_type.type](**attr) 4638 elif isinstance(field_type, gae.Property): 4639 ftype = field_type 4640 elif field_type.startswith('id'): 4641 continue 4642 elif field_type.startswith('decimal'): 4643 precision, scale = field_type[7:].strip('()').split(',') 4644 precision = int(precision) 4645 scale = int(scale) 4646 ftype = GAEDecimalProperty(precision, scale, **attr) 4647 elif field_type.startswith('reference'): 4648 if field.notnull: 4649 attr = dict(required=True) 4650 referenced = field_type[10:].strip() 4651 ftype = self.types[field_type[:9]](referenced, **attr) 4652 elif field_type.startswith('list:reference'): 4653 if field.notnull: 4654 attr['required'] = True 4655 referenced = field_type[15:].strip() 4656 ftype = self.types[field_type[:14]](**attr) 4657 elif field_type.startswith('list:'): 4658 ftype = self.types[field_type](**attr) 4659 elif not field_type in self.types\ 4660 or not self.types[field_type]: 4661 raise SyntaxError('Field: unknown field type: %s' % field_type) 4662 else: 4663 ftype = self.types[field_type](**attr) 4664 myfields[field.name] = ftype 4665 if not polymodel: 4666 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 4667 elif polymodel==True: 4668 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 4669 elif isinstance(polymodel,Table): 4670 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 4671 else: 4672 raise SyntaxError("polymodel must be None, True, a table or a tablename") 4673 return None
4674
4675 - def expand(self,expression,field_type=None):
4676 if isinstance(expression,Field): 4677 if expression.type in ('text', 'blob', 'json'): 4678 raise SyntaxError('AppEngine does not index by: %s' % expression.type) 4679 return expression.name 4680 elif isinstance(expression, (Expression, Query)): 4681 if not expression.second is None: 4682 return expression.op(expression.first, expression.second) 4683 elif not expression.first is None: 4684 return expression.op(expression.first) 4685 else: 4686 return expression.op() 4687 elif field_type: 4688 return self.represent(expression,field_type) 4689 elif isinstance(expression,(list,tuple)): 4690 return ','.join([self.represent(item,field_type) for item in expression]) 4691 else: 4692 return str(expression)
4693 4694 ### TODO from gql.py Expression
4695 - def AND(self,first,second):
4696 a = self.expand(first) 4697 b = self.expand(second) 4698 if b[0].name=='__key__' and a[0].name!='__key__': 4699 return b+a 4700 return a+b
4701
4702 - def EQ(self,first,second=None):
4703 if isinstance(second, Key): 4704 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 4705 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
4706
4707 - def NE(self,first,second=None):
4708 if first.type != 'id': 4709 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 4710 else: 4711 if not second is None: 4712 second = Key.from_path(first._tablename, long(second)) 4713 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
4714
4715 - def LT(self,first,second=None):
4716 if first.type != 'id': 4717 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 4718 else: 4719 second = Key.from_path(first._tablename, long(second)) 4720 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
4721
4722 - def LE(self,first,second=None):
4723 if first.type != 'id': 4724 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 4725 else: 4726 second = Key.from_path(first._tablename, long(second)) 4727 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
4728
4729 - def GT(self,first,second=None):
4730 if first.type != 'id' or second==0 or second == '0': 4731 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 4732 else: 4733 second = Key.from_path(first._tablename, long(second)) 4734 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
4735
4736 - def GE(self,first,second=None):
4737 if first.type != 'id': 4738 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 4739 else: 4740 second = Key.from_path(first._tablename, long(second)) 4741 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
4742
4743 - def INVERT(self,first):
4744 return '-%s' % first.name
4745
4746 - def COMMA(self,first,second):
4747 return '%s, %s' % (self.expand(first),self.expand(second))
4748
4749 - def BELONGS(self,first,second=None):
4750 if not isinstance(second,(list, tuple)): 4751 raise SyntaxError("Not supported") 4752 if first.type != 'id': 4753 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 4754 else: 4755 second = [Key.from_path(first._tablename, int(i)) for i in second] 4756 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
4757
4758 - def CONTAINS(self,first,second,case_sensitive=False):
4759 # silently ignoring: GAE can only do case sensitive matches! 4760 if not first.type.startswith('list:'): 4761 raise SyntaxError("Not supported") 4762 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
4763
4764 - def NOT(self,first):
4765 nops = { self.EQ: self.NE, 4766 self.NE: self.EQ, 4767 self.LT: self.GE, 4768 self.GT: self.LE, 4769 self.LE: self.GT, 4770 self.GE: self.LT} 4771 if not isinstance(first,Query): 4772 raise SyntaxError("Not suported") 4773 nop = nops.get(first.op,None) 4774 if not nop: 4775 raise SyntaxError("Not suported %s" % first.op.__name__) 4776 first.op = nop 4777 return self.expand(first)
4778
4779 - def truncate(self,table,mode):
4780 self.db(self.db._adapter.id_query(table)).delete()
4781
4782 - def select_raw(self,query,fields=None,attributes=None):
4783 db = self.db 4784 fields = fields or [] 4785 attributes = attributes or {} 4786 args_get = attributes.get 4787 new_fields = [] 4788 for item in fields: 4789 if isinstance(item,SQLALL): 4790 new_fields += item._table 4791 else: 4792 new_fields.append(item) 4793 fields = new_fields 4794 if query: 4795 tablename = self.get_table(query) 4796 elif fields: 4797 tablename = fields[0].tablename 4798 query = db._adapter.id_query(fields[0].table) 4799 else: 4800 raise SyntaxError("Unable to determine a tablename") 4801 4802 if query: 4803 if use_common_filters(query): 4804 query = self.common_filter(query,[tablename]) 4805 4806 #tableobj is a GAE Model class (or subclass) 4807 tableobj = db[tablename]._tableobj 4808 filters = self.expand(query) 4809 4810 projection = None 4811 if len(db[tablename].fields) == len(fields): 4812 #getting all fields, not a projection query 4813 projection = None 4814 elif args_get('projection') == True: 4815 projection = [] 4816 for f in fields: 4817 if f.type in ['text', 'blob', 'json']: 4818 raise SyntaxError( 4819 "text and blob field types not allowed in projection queries") 4820 else: 4821 projection.append(f.name) 4822 elif args_get('filterfields') == True: 4823 projection = [] 4824 for f in fields: 4825 projection.append(f.name) 4826 4827 # real projection's can't include 'id'. 4828 # it will be added to the result later 4829 query_projection = [ 4830 p for p in projection if \ 4831 p != db[tablename]._id.name] if projection and \ 4832 args_get('projection') == True\ 4833 else None 4834 4835 cursor = None 4836 if isinstance(args_get('reusecursor'), str): 4837 cursor = args_get('reusecursor') 4838 items = gae.Query(tableobj, projection=query_projection, 4839 cursor=cursor) 4840 4841 for filter in filters: 4842 if args_get('projection') == True and \ 4843 filter.name in query_projection and \ 4844 filter.op in ['=', '<=', '>=']: 4845 raise SyntaxError( 4846 "projection fields cannot have equality filters") 4847 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 4848 continue 4849 elif filter.name=='__key__' and filter.op=='=': 4850 if filter.value==0: 4851 items = [] 4852 elif isinstance(filter.value, Key): 4853 # key qeuries return a class instance, 4854 # can't use projection 4855 # extra values will be ignored in post-processing later 4856 item = tableobj.get(filter.value) 4857 items = (item and [item]) or [] 4858 else: 4859 # key qeuries return a class instance, 4860 # can't use projection 4861 # extra values will be ignored in post-processing later 4862 item = tableobj.get_by_id(filter.value) 4863 items = (item and [item]) or [] 4864 elif isinstance(items,list): # i.e. there is a single record! 4865 items = [i for i in items if filter.apply( 4866 getattr(item,filter.name),filter.value)] 4867 else: 4868 if filter.name=='__key__' and filter.op != 'in': 4869 items.order('__key__') 4870 items = items.filter('%s %s' % (filter.name,filter.op), 4871 filter.value) 4872 if not isinstance(items,list): 4873 if args_get('left', None): 4874 raise SyntaxError('Set: no left join in appengine') 4875 if args_get('groupby', None): 4876 raise SyntaxError('Set: no groupby in appengine') 4877 orderby = args_get('orderby', False) 4878 if orderby: 4879 ### THIS REALLY NEEDS IMPROVEMENT !!! 4880 if isinstance(orderby, (list, tuple)): 4881 orderby = xorify(orderby) 4882 if isinstance(orderby,Expression): 4883 orderby = self.expand(orderby) 4884 orders = orderby.split(', ') 4885 for order in orders: 4886 order={'-id':'-__key__','id':'__key__'}.get(order,order) 4887 items = items.order(order) 4888 if args_get('limitby', None): 4889 (lmin, lmax) = attributes['limitby'] 4890 (limit, offset) = (lmax - lmin, lmin) 4891 rows = items.fetch(limit,offset=offset) 4892 #cursor is only useful if there was a limit and we didn't return 4893 # all results 4894 if args_get('reusecursor'): 4895 db['_lastcursor'] = items.cursor() 4896 items = rows 4897 return (items, tablename, projection or db[tablename].fields)
4898
4899 - def select(self,query,fields,attributes):
4900 """ 4901 This is the GAE version of select. some notes to consider: 4902 - db['_lastsql'] is not set because there is not SQL statement string 4903 for a GAE query 4904 - 'nativeRef' is a magical fieldname used for self references on GAE 4905 - optional attribute 'projection' when set to True will trigger 4906 use of the GAE projection queries. note that there are rules for 4907 what is accepted imposed by GAE: each field must be indexed, 4908 projection queries cannot contain blob or text fields, and you 4909 cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection 4910 - optional attribute 'filterfields' when set to True web2py will only 4911 parse the explicitly listed fields into the Rows object, even though 4912 all fields are returned in the query. This can be used to reduce 4913 memory usage in cases where true projection queries are not 4914 usable. 4915 - optional attribute 'reusecursor' allows use of cursor with queries 4916 that have the limitby attribute. Set the attribute to True for the 4917 first query, set it to the value of db['_lastcursor'] to continue 4918 a previous query. The user must save the cursor value between 4919 requests, and the filters must be identical. It is up to the user 4920 to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors 4921 """ 4922 4923 (items, tablename, fields) = self.select_raw(query,fields,attributes) 4924 # self.db['_lastsql'] = self._select(query,fields,attributes) 4925 rows = [[(t==self.db[tablename]._id.name and item) or \ 4926 (t=='nativeRef' and item) or getattr(item, t) \ 4927 for t in fields] for item in items] 4928 colnames = ['%s.%s' % (tablename, t) for t in fields] 4929 processor = attributes.get('processor',self.parse) 4930 return processor(rows,fields,colnames,False)
4931
4932 - def count(self,query,distinct=None,limit=None):
4933 if distinct: 4934 raise RuntimeError("COUNT DISTINCT not supported") 4935 (items, tablename, fields) = self.select_raw(query) 4936 # self.db['_lastsql'] = self._count(query) 4937 try: 4938 return len(items) 4939 except TypeError: 4940 return items.count(limit=limit)
4941
4942 - def delete(self,tablename, query):
4943 """ 4944 This function was changed on 2010-05-04 because according to 4945 http://code.google.com/p/googleappengine/issues/detail?id=3119 4946 GAE no longer supports deleting more than 1000 records. 4947 """ 4948 # self.db['_lastsql'] = self._delete(tablename,query) 4949 (items, tablename, fields) = self.select_raw(query) 4950 # items can be one item or a query 4951 if not isinstance(items,list): 4952 #use a keys_only query to ensure that this runs as a datastore 4953 # small operations 4954 leftitems = items.fetch(1000, keys_only=True) 4955 counter = 0 4956 while len(leftitems): 4957 counter += len(leftitems) 4958 gae.delete(leftitems) 4959 leftitems = items.fetch(1000, keys_only=True) 4960 else: 4961 counter = len(items) 4962 gae.delete(items) 4963 return counter
4964
4965 - def update(self,tablename,query,update_fields):
4966 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 4967 (items, tablename, fields) = self.select_raw(query) 4968 counter = 0 4969 for item in items: 4970 for field, value in update_fields: 4971 setattr(item, field.name, self.represent(value,field.type)) 4972 item.put() 4973 counter += 1 4974 LOGGER.info(str(counter)) 4975 return counter
4976
4977 - def insert(self,table,fields):
4978 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 4979 # table._db['_lastsql'] = self._insert(table,fields) 4980 tmp = table._tableobj(**dfields) 4981 tmp.put() 4982 rid = Reference(tmp.key().id()) 4983 (rid._table, rid._record, rid._gaekey) = (table, None, tmp.key()) 4984 return rid
4985
4986 - def bulk_insert(self,table,items):
4987 parsed_items = [] 4988 for item in items: 4989 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 4990 parsed_items.append(table._tableobj(**dfields)) 4991 gae.put(parsed_items) 4992 return True
4993
4994 -def uuid2int(uuidv):
4995 return uuid.UUID(uuidv).int
4996
4997 -def int2uuid(n):
4998 return str(uuid.UUID(int=n))
4999
5000 -class CouchDBAdapter(NoSQLAdapter):
5001 drivers = ('couchdb',) 5002 5003 uploads_in_blob = True 5004 types = { 5005 'boolean': bool, 5006 'string': str, 5007 'text': str, 5008 'json': str, 5009 'password': str, 5010 'blob': str, 5011 'upload': str, 5012 'integer': long, 5013 'bigint': long, 5014 'float': float, 5015 'double': float, 5016 'date': datetime.date, 5017 'time': datetime.time, 5018 'datetime': datetime.datetime, 5019 'id': long, 5020 'reference': long, 5021 'list:string': list, 5022 'list:integer': list, 5023 'list:reference': list, 5024 } 5025
5026 - def file_exists(self, filename): pass
5027 - def file_open(self, filename, mode='rb', lock=True): pass
5028 - def file_close(self, fileobj): pass
5029
5030 - def expand(self,expression,field_type=None):
5031 if isinstance(expression,Field): 5032 if expression.type=='id': 5033 return "%s._id" % expression.tablename 5034 return BaseAdapter.expand(self,expression,field_type)
5035
5036 - def AND(self,first,second):
5037 return '(%s && %s)' % (self.expand(first),self.expand(second))
5038
5039 - def OR(self,first,second):
5040 return '(%s || %s)' % (self.expand(first),self.expand(second))
5041
5042 - def EQ(self,first,second):
5043 if second is None: 5044 return '(%s == null)' % self.expand(first) 5045 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
5046
5047 - def NE(self,first,second):
5048 if second is None: 5049 return '(%s != null)' % self.expand(first) 5050 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
5051
5052 - def COMMA(self,first,second):
5053 return '%s + %s' % (self.expand(first),self.expand(second))
5054
5055 - def represent(self, obj, fieldtype):
5056 value = NoSQLAdapter.represent(self, obj, fieldtype) 5057 if fieldtype=='id': 5058 return repr(str(long(value))) 5059 elif fieldtype in ('date','time','datetime','boolean'): 5060 return serializers.json(value) 5061 return repr(not isinstance(value,unicode) and value \ 5062 or value and value.encode('utf8'))
5063
5064 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 5065 pool_size=0,folder=None,db_codec ='UTF-8', 5066 credential_decoder=IDENTITY, driver_args={}, 5067 adapter_args={}, do_connect=True, after_connection=None):
5068 self.db = db 5069 self.uri = uri 5070 if do_connect: self.find_driver(adapter_args) 5071 self.dbengine = 'couchdb' 5072 self.folder = folder 5073 db['_lastsql'] = '' 5074 self.db_codec = 'UTF-8' 5075 self._after_connection = after_connection 5076 self.pool_size = pool_size 5077 5078 url='http://'+uri[10:] 5079 def connector(url=url,driver_args=driver_args): 5080 return self.driver.Server(url,**driver_args)
5081 self.reconnect(connector,cursor=False)
5082
5083 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
5084 if migrate: 5085 try: 5086 self.connection.create(table._tablename) 5087 except: 5088 pass
5089
5090 - def insert(self,table,fields):
5091 id = uuid2int(web2py_uuid()) 5092 ctable = self.connection[table._tablename] 5093 values = dict((k.name,self.represent(v,k.type)) for k,v in fields) 5094 values['_id'] = str(id) 5095 ctable.save(values) 5096 return id
5097
5098 - def _select(self,query,fields,attributes):
5099 if not isinstance(query,Query): 5100 raise SyntaxError("Not Supported") 5101 for key in set(attributes.keys())-SELECT_ARGS: 5102 raise SyntaxError('invalid select attribute: %s' % key) 5103 new_fields=[] 5104 for item in fields: 5105 if isinstance(item,SQLALL): 5106 new_fields += item._table 5107 else: 5108 new_fields.append(item) 5109 def uid(fd): 5110 return fd=='id' and '_id' or fd
5111 def get(row,fd): 5112 return fd=='id' and long(row['_id']) or row.get(fd,None) 5113 fields = new_fields 5114 tablename = self.get_table(query) 5115 fieldnames = [f.name for f in (fields or self.db[tablename])] 5116 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 5117 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 5118 fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ 5119 dict(t=tablename, 5120 query=self.expand(query), 5121 order='%s._id' % tablename, 5122 fields=fields) 5123 return fn, colnames 5124
5125 - def select(self,query,fields,attributes):
5126 if not isinstance(query,Query): 5127 raise SyntaxError("Not Supported") 5128 fn, colnames = self._select(query,fields,attributes) 5129 tablename = colnames[0].split('.')[0] 5130 ctable = self.connection[tablename] 5131 rows = [cols['value'] for cols in ctable.query(fn)] 5132 processor = attributes.get('processor',self.parse) 5133 return processor(rows,fields,colnames,False)
5134
5135 - def delete(self,tablename,query):
5136 if not isinstance(query,Query): 5137 raise SyntaxError("Not Supported") 5138 if query.first.type=='id' and query.op==self.EQ: 5139 id = query.second 5140 tablename = query.first.tablename 5141 assert(tablename == query.first.tablename) 5142 ctable = self.connection[tablename] 5143 try: 5144 del ctable[str(id)] 5145 return 1 5146 except couchdb.http.ResourceNotFound: 5147 return 0 5148 else: 5149 tablename = self.get_table(query) 5150 rows = self.select(query,[self.db[tablename]._id],{}) 5151 ctable = self.connection[tablename] 5152 for row in rows: 5153 del ctable[str(row.id)] 5154 return len(rows)
5155
5156 - def update(self,tablename,query,fields):
5157 if not isinstance(query,Query): 5158 raise SyntaxError("Not Supported") 5159 if query.first.type=='id' and query.op==self.EQ: 5160 id = query.second 5161 tablename = query.first.tablename 5162 ctable = self.connection[tablename] 5163 try: 5164 doc = ctable[str(id)] 5165 for key,value in fields: 5166 doc[key.name] = self.represent(value,self.db[tablename][key.name].type) 5167 ctable.save(doc) 5168 return 1 5169 except couchdb.http.ResourceNotFound: 5170 return 0 5171 else: 5172 tablename = self.get_table(query) 5173 rows = self.select(query,[self.db[tablename]._id],{}) 5174 ctable = self.connection[tablename] 5175 table = self.db[tablename] 5176 for row in rows: 5177 doc = ctable[str(row.id)] 5178 for key,value in fields: 5179 doc[key.name] = self.represent(value,table[key.name].type) 5180 ctable.save(doc) 5181 return len(rows)
5182
5183 - def count(self,query,distinct=None):
5184 if distinct: 5185 raise RuntimeError("COUNT DISTINCT not supported") 5186 if not isinstance(query,Query): 5187 raise SyntaxError("Not Supported") 5188 tablename = self.get_table(query) 5189 rows = self.select(query,[self.db[tablename]._id],{}) 5190 return len(rows)
5191
5192 -def cleanup(text):
5193 """ 5194 validates that the given text is clean: only contains [0-9a-zA-Z_] 5195 """ 5196 if not REGEX_ALPHANUMERIC.match(text): 5197 raise SyntaxError('invalid table or field name: %s' % text) 5198 return text
5199
5200 -class MongoDBAdapter(NoSQLAdapter):
5201 native_json = True 5202 drivers = ('pymongo',) 5203 5204 uploads_in_blob = True 5205 5206 types = { 5207 'boolean': bool, 5208 'string': str, 5209 'text': str, 5210 'json': str, 5211 'password': str, 5212 'blob': str, 5213 'upload': str, 5214 'integer': long, 5215 'bigint': long, 5216 'float': float, 5217 'double': float, 5218 'date': datetime.date, 5219 'time': datetime.time, 5220 'datetime': datetime.datetime, 5221 'id': long, 5222 'reference': long, 5223 'list:string': list, 5224 'list:integer': list, 5225 'list:reference': list, 5226 } 5227 5228 error_messages = {"javascript_needed": "This must yet be replaced" + 5229 " with javascript in order to work."} 5230
5231 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 5232 pool_size=0, folder=None, db_codec ='UTF-8', 5233 credential_decoder=IDENTITY, driver_args={}, 5234 adapter_args={}, do_connect=True, after_connection=None):
5235 5236 self.db = db 5237 self.uri = uri 5238 if do_connect: self.find_driver(adapter_args) 5239 import random 5240 from bson.objectid import ObjectId 5241 from bson.son import SON 5242 import pymongo.uri_parser 5243 5244 m = pymongo.uri_parser.parse_uri(uri) 5245 5246 self.SON = SON 5247 self.ObjectId = ObjectId 5248 self.random = random 5249 5250 self.dbengine = 'mongodb' 5251 self.folder = folder 5252 db['_lastsql'] = '' 5253 self.db_codec = 'UTF-8' 5254 self._after_connection = after_connection 5255 self.pool_size = pool_size 5256 #this is the minimum amount of replicates that it should wait 5257 # for on insert/update 5258 self.minimumreplication = adapter_args.get('minimumreplication',0) 5259 # by default all inserts and selects are performand asynchronous, 5260 # but now the default is 5261 # synchronous, except when overruled by either this default or 5262 # function parameter 5263 self.safe = adapter_args.get('safe',True) 5264 5265 if isinstance(m,tuple): 5266 m = {"database" : m[1]} 5267 if m.get('database')==None: 5268 raise SyntaxError("Database is required!") 5269 5270 def connector(uri=self.uri,m=m): 5271 # Connection() is deprecated 5272 if hasattr(self.driver, "MongoClient"): 5273 Connection = self.driver.MongoClient 5274 else: 5275 Connection = self.driver.Connection 5276 return Connection(uri)[m.get('database')]
5277 5278 self.reconnect(connector,cursor=False)
5279
5280 - def object_id(self, arg=None):
5281 """ Convert input to a valid Mongodb ObjectId instance 5282 5283 self.object_id("<random>") -> ObjectId (not unique) instance """ 5284 if not arg: 5285 arg = 0 5286 if isinstance(arg, basestring): 5287 # we assume an integer as default input 5288 rawhex = len(arg.replace("0x", "").replace("L", "")) == 24 5289 if arg.isdigit() and (not rawhex): 5290 arg = int(arg) 5291 elif arg == "<random>": 5292 arg = int("0x%sL" % \ 5293 "".join([self.random.choice("0123456789abcdef") \ 5294 for x in range(24)]), 0) 5295 elif arg.isalnum(): 5296 if not arg.startswith("0x"): 5297 arg = "0x%s" % arg 5298 try: 5299 arg = int(arg, 0) 5300 except ValueError, e: 5301 raise ValueError( 5302 "invalid objectid argument string: %s" % e) 5303 else: 5304 raise ValueError("Invalid objectid argument string. " + 5305 "Requires an integer or base 16 value") 5306 elif isinstance(arg, self.ObjectId): 5307 return arg 5308 5309 if not isinstance(arg, (int, long)): 5310 raise TypeError("object_id argument must be of type " + 5311 "ObjectId or an objectid representable integer") 5312 if arg == 0: 5313 hexvalue = "".zfill(24) 5314 else: 5315 hexvalue = hex(arg)[2:].replace("L", "") 5316 return self.ObjectId(hexvalue)
5317
5318 - def parse_reference(self, value, field_type):
5319 # here we have to check for ObjectID before base parse 5320 if isinstance(value, self.ObjectId): 5321 value = long(str(value), 16) 5322 return super(MongoDBAdapter, 5323 self).parse_reference(value, field_type)
5324
5325 - def parse_id(self, value, field_type):
5326 if isinstance(value, self.ObjectId): 5327 value = long(str(value), 16) 5328 return super(MongoDBAdapter, 5329 self).parse_id(value, field_type)
5330
5331 - def represent(self, obj, fieldtype):
5332 # the base adatpter does not support MongoDB ObjectId 5333 if isinstance(obj, self.ObjectId): 5334 value = obj 5335 else: 5336 value = NoSQLAdapter.represent(self, obj, fieldtype) 5337 # reference types must be convert to ObjectID 5338 if fieldtype =='date': 5339 if value == None: 5340 return value 5341 # this piece of data can be stripped off based on the fieldtype 5342 t = datetime.time(0, 0, 0) 5343 # mongodb doesn't has a date object and so it must datetime, 5344 # string or integer 5345 return datetime.datetime.combine(value, t) 5346 elif fieldtype == 'time': 5347 if value == None: 5348 return value 5349 # this piece of data can be stripped of based on the fieldtype 5350 d = datetime.date(2000, 1, 1) 5351 # mongodb doesn't has a time object and so it must datetime, 5352 # string or integer 5353 return datetime.datetime.combine(d, value) 5354 elif fieldtype == "blob": 5355 from bson import Binary 5356 if not isinstance(value, Binary): 5357 return Binary(value) 5358 return value 5359 elif (isinstance(fieldtype, basestring) and 5360 fieldtype.startswith('list:')): 5361 if fieldtype.startswith('list:reference'): 5362 newval = [] 5363 for v in value: 5364 newval.append(self.object_id(v)) 5365 return newval 5366 return value 5367 elif ((isinstance(fieldtype, basestring) and 5368 fieldtype.startswith("reference")) or 5369 (isinstance(fieldtype, Table)) or fieldtype=="id"): 5370 value = self.object_id(value) 5371 return value
5372
5373 - def create_table(self, table, migrate=True, fake_migrate=False, 5374 polymodel=None, isCapped=False):
5375 if isCapped: 5376 raise RuntimeError("Not implemented")
5377
5378 - def count(self, query, distinct=None, snapshot=True):
5379 if distinct: 5380 raise RuntimeError("COUNT DISTINCT not supported") 5381 if not isinstance(query,Query): 5382 raise SyntaxError("Not Supported") 5383 tablename = self.get_table(query) 5384 return long(self.select(query,[self.db[tablename]._id], {}, 5385 count=True,snapshot=snapshot)['count'])
5386 # Maybe it would be faster if we just implemented the pymongo 5387 # .count() function which is probably quicker? 5388 # therefor call __select() connection[table].find(query).count() 5389 # Since this will probably reduce the return set? 5390
5391 - def expand(self, expression, field_type=None):
5392 if isinstance(expression, Query): 5393 # any query using 'id':= 5394 # set name as _id (as per pymongo/mongodb primary key) 5395 # convert second arg to an objectid field 5396 # (if its not already) 5397 # if second arg is 0 convert to objectid 5398 if isinstance(expression.first,Field) and \ 5399 ((expression.first.type == 'id') or \ 5400 ("reference" in expression.first.type)): 5401 if expression.first.type == 'id': 5402 expression.first.name = '_id' 5403 # cast to Mongo ObjectId 5404 if isinstance(expression.second, (tuple, list, set)): 5405 expression.second = [self.object_id(item) for 5406 item in expression.second] 5407 else: 5408 expression.second = self.object_id(expression.second) 5409 result = expression.op(expression.first, expression.second) 5410 5411 if isinstance(expression, Field): 5412 if expression.type=='id': 5413 result = "_id" 5414 else: 5415 result = expression.name 5416 elif isinstance(expression, (Expression, Query)): 5417 if not expression.second is None: 5418 result = expression.op(expression.first, expression.second) 5419 elif not expression.first is None: 5420 result = expression.op(expression.first) 5421 elif not isinstance(expression.op, str): 5422 result = expression.op() 5423 else: 5424 result = expression.op 5425 elif field_type: 5426 result = self.represent(expression,field_type) 5427 elif isinstance(expression,(list,tuple)): 5428 result = ','.join(self.represent(item,field_type) for 5429 item in expression) 5430 else: 5431 result = expression 5432 return result
5433
5434 - def drop(self, table, mode=''):
5435 ctable = self.connection[table._tablename] 5436 ctable.drop()
5437
5438 - def truncate(self, table, mode, safe=None):
5439 if safe == None: 5440 safe=self.safe 5441 ctable = self.connection[table._tablename] 5442 ctable.remove(None, safe=True)
5443
5444 - def _select(self, query, fields, attributes):
5445 if 'for_update' in attributes: 5446 logging.warn('mongodb does not support for_update') 5447 for key in set(attributes.keys())-set(('limitby', 5448 'orderby','for_update')): 5449 if attributes[key]!=None: 5450 logging.warn('select attribute not implemented: %s' % key) 5451 5452 new_fields=[] 5453 mongosort_list = [] 5454 5455 # try an orderby attribute 5456 orderby = attributes.get('orderby', False) 5457 limitby = attributes.get('limitby', False) 5458 # distinct = attributes.get('distinct', False) 5459 if orderby: 5460 if isinstance(orderby, (list, tuple)): 5461 orderby = xorify(orderby) 5462 5463 # !!!! need to add 'random' 5464 for f in self.expand(orderby).split(','): 5465 if f.startswith('-'): 5466 mongosort_list.append((f[1:], -1)) 5467 else: 5468 mongosort_list.append((f, 1)) 5469 if limitby: 5470 limitby_skip, limitby_limit = limitby[0], int(limitby[1]) 5471 else: 5472 limitby_skip = limitby_limit = 0 5473 5474 mongofields_dict = self.SON() 5475 mongoqry_dict = {} 5476 for item in fields: 5477 if isinstance(item, SQLALL): 5478 new_fields += item._table 5479 else: 5480 new_fields.append(item) 5481 fields = new_fields 5482 if isinstance(query,Query): 5483 tablename = self.get_table(query) 5484 elif len(fields) != 0: 5485 tablename = fields[0].tablename 5486 else: 5487 raise SyntaxError("The table name could not be found in " + 5488 "the query nor from the select statement.") 5489 mongoqry_dict = self.expand(query) 5490 fields = fields or self.db[tablename] 5491 for field in fields: 5492 mongofields_dict[field.name] = 1 5493 5494 return tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5495 limitby_limit, limitby_skip
5496
5497 - def select(self, query, fields, attributes, count=False, 5498 snapshot=False):
5499 # TODO: support joins 5500 tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5501 limitby_limit, limitby_skip = self._select(query, fields, attributes) 5502 ctable = self.connection[tablename] 5503 5504 if count: 5505 return {'count' : ctable.find( 5506 mongoqry_dict, mongofields_dict, 5507 skip=limitby_skip, limit=limitby_limit, 5508 sort=mongosort_list, snapshot=snapshot).count()} 5509 else: 5510 # pymongo cursor object 5511 mongo_list_dicts = ctable.find(mongoqry_dict, 5512 mongofields_dict, skip=limitby_skip, 5513 limit=limitby_limit, sort=mongosort_list, 5514 snapshot=snapshot) 5515 rows = [] 5516 # populate row in proper order 5517 # Here we replace ._id with .id to follow the standard naming 5518 colnames = [] 5519 newnames = [] 5520 for field in fields: 5521 colname = str(field) 5522 colnames.append(colname) 5523 tablename, fieldname = colname.split(".") 5524 if fieldname == "_id": 5525 # Mongodb reserved uuid key 5526 field.name = "id" 5527 newnames.append(".".join((tablename, field.name))) 5528 5529 for record in mongo_list_dicts: 5530 row=[] 5531 for colname in colnames: 5532 tablename, fieldname = colname.split(".") 5533 # switch to Mongo _id uuids for retrieving 5534 # record id's 5535 if fieldname == "id": fieldname = "_id" 5536 if fieldname in record: 5537 value = record[fieldname] 5538 else: 5539 value = None 5540 row.append(value) 5541 rows.append(row) 5542 5543 processor = attributes.get('processor', self.parse) 5544 result = processor(rows, fields, newnames, False) 5545 return result
5546
5547 - def _insert(self, table, fields):
5548 values = dict() 5549 for k, v in fields: 5550 if not k.name in ["id", "safe"]: 5551 fieldname = k.name 5552 fieldtype = table[k.name].type 5553 values[fieldname] = self.represent(v, fieldtype) 5554 return values
5555 5556 # Safe determines whether a asynchronious request is done or a 5557 # synchronious action is done 5558 # For safety, we use by default synchronous requests
5559 - def insert(self, table, fields, safe=None):
5560 if safe==None: 5561 safe = self.safe 5562 ctable = self.connection[table._tablename] 5563 values = self._insert(table, fields) 5564 ctable.insert(values, safe=safe) 5565 return long(str(values['_id']), 16)
5566 5567 #this function returns a dict with the where clause and update fields
5568 - def _update(self, tablename, query, fields):
5569 if not isinstance(query, Query): 5570 raise SyntaxError("Not Supported") 5571 filter = None 5572 if query: 5573 filter = self.expand(query) 5574 # do not try to update id fields to avoid backend errors 5575 modify = {'$set': dict((k.name, self.represent(v, k.type)) for 5576 k, v in fields if (not k.name in ("_id", "id")))} 5577 return modify, filter
5578
5579 - def update(self, tablename, query, fields, safe=None):
5580 if safe == None: 5581 safe = self.safe 5582 # return amount of adjusted rows or zero, but no exceptions 5583 # @ related not finding the result 5584 if not isinstance(query, Query): 5585 raise RuntimeError("Not implemented") 5586 amount = self.count(query, False) 5587 modify, filter = self._update(tablename, query, fields) 5588 try: 5589 result = self.connection[tablename].update(filter, 5590 modify, multi=True, safe=safe) 5591 if safe: 5592 try: 5593 # if result count is available fetch it 5594 return result["n"] 5595 except (KeyError, AttributeError, TypeError): 5596 return amount 5597 else: 5598 return amount 5599 except Exception, e: 5600 # TODO Reverse update query to verifiy that the query succeded 5601 raise RuntimeError("uncaught exception when updating rows: %s" % e)
5602
5603 - def _delete(self, tablename, query):
5604 if not isinstance(query, Query): 5605 raise RuntimeError("query type %s is not supported" % \ 5606 type(query)) 5607 return self.expand(query)
5608
5609 - def delete(self, tablename, query, safe=None):
5610 if safe is None: 5611 safe = self.safe 5612 amount = 0 5613 amount = self.count(query, False) 5614 filter = self._delete(tablename, query) 5615 self.connection[tablename].remove(filter, safe=safe) 5616 return amount
5617
5618 - def bulk_insert(self, table, items):
5619 return [self.insert(table,item) for item in items]
5620 5621 ## OPERATORS
5622 - def INVERT(self, first):
5623 #print "in invert first=%s" % first 5624 return '-%s' % self.expand(first)
5625 5626 # TODO This will probably not work:(
5627 - def NOT(self, first):
5628 result = {} 5629 result["$not"] = self.expand(first) 5630 return result
5631
5632 - def AND(self,first,second):
5633 f = self.expand(first) 5634 s = self.expand(second) 5635 f.update(s) 5636 return f
5637
5638 - def OR(self,first,second):
5639 # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) 5640 result = {} 5641 f = self.expand(first) 5642 s = self.expand(second) 5643 result['$or'] = [f,s] 5644 return result
5645
5646 - def BELONGS(self, first, second):
5647 if isinstance(second, str): 5648 return {self.expand(first) : {"$in" : [ second[:-1]]} } 5649 elif second==[] or second==() or second==set(): 5650 return {1:0} 5651 items = [self.expand(item, first.type) for item in second] 5652 return {self.expand(first) : {"$in" : items} }
5653
5654 - def EQ(self,first,second):
5655 result = {} 5656 result[self.expand(first)] = self.expand(second) 5657 return result
5658
5659 - def NE(self, first, second=None):
5660 result = {} 5661 result[self.expand(first)] = {'$ne': self.expand(second)} 5662 return result
5663
5664 - def LT(self,first,second=None):
5665 if second is None: 5666 raise RuntimeError("Cannot compare %s < None" % first) 5667 result = {} 5668 result[self.expand(first)] = {'$lt': self.expand(second)} 5669 return result
5670
5671 - def LE(self,first,second=None):
5672 if second is None: 5673 raise RuntimeError("Cannot compare %s <= None" % first) 5674 result = {} 5675 result[self.expand(first)] = {'$lte': self.expand(second)} 5676 return result
5677
5678 - def GT(self,first,second):
5679 result = {} 5680 result[self.expand(first)] = {'$gt': self.expand(second)} 5681 return result
5682
5683 - def GE(self,first,second=None):
5684 if second is None: 5685 raise RuntimeError("Cannot compare %s >= None" % first) 5686 result = {} 5687 result[self.expand(first)] = {'$gte': self.expand(second)} 5688 return result
5689
5690 - def ADD(self, first, second):
5691 raise NotImplementedError(self.error_messages["javascript_needed"]) 5692 return '%s + %s' % (self.expand(first), 5693 self.expand(second, first.type))
5694
5695 - def SUB(self, first, second):
5696 raise NotImplementedError(self.error_messages["javascript_needed"]) 5697 return '(%s - %s)' % (self.expand(first), 5698 self.expand(second, first.type))
5699
5700 - def MUL(self, first, second):
5701 raise NotImplementedError(self.error_messages["javascript_needed"]) 5702 return '(%s * %s)' % (self.expand(first), 5703 self.expand(second, first.type))
5704
5705 - def DIV(self, first, second):
5706 raise NotImplementedError(self.error_messages["javascript_needed"]) 5707 return '(%s / %s)' % (self.expand(first), 5708 self.expand(second, first.type))
5709
5710 - def MOD(self, first, second):
5711 raise NotImplementedError(self.error_messages["javascript_needed"]) 5712 return '(%s %% %s)' % (self.expand(first), 5713 self.expand(second, first.type))
5714
5715 - def AS(self, first, second):
5716 raise NotImplementedError(self.error_messages["javascript_needed"]) 5717 return '%s AS %s' % (self.expand(first), second)
5718 5719 # We could implement an option that simulates a full featured SQL 5720 # database. But I think the option should be set explicit or 5721 # implemented as another library.
5722 - def ON(self, first, second):
5723 raise NotImplementedError("This is not possible in NoSQL" + 5724 " but can be simulated with a wrapper.") 5725 return '%s ON %s' % (self.expand(first), self.expand(second))
5726 5727 # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS 5728 # WHICH ONE IS BEST? 5729
5730 - def COMMA(self, first, second):
5731 return '%s, %s' % (self.expand(first), self.expand(second))
5732
5733 - def LIKE(self, first, second):
5734 #escaping regex operators? 5735 return {self.expand(first): ('%s' % \ 5736 self.expand(second, 'string').replace('%','/'))}
5737
5738 - def STARTSWITH(self, first, second):
5739 #escaping regex operators? 5740 return {self.expand(first): ('/^%s/' % \ 5741 self.expand(second, 'string'))}
5742
5743 - def ENDSWITH(self, first, second):
5744 #escaping regex operators? 5745 return {self.expand(first): ('/%s^/' % \ 5746 self.expand(second, 'string'))}
5747
5748 - def CONTAINS(self, first, second, case_sensitive=False):
5749 # silently ignore, only case sensitive 5750 # There is a technical difference, but mongodb doesn't support 5751 # that, but the result will be the same 5752 val = second if isinstance(second,self.ObjectId) else \ 5753 {'$regex':".*" + re.escape(self.expand(second, 'string')) + ".*"} 5754 return {self.expand(first) : val}
5755
5756 - def LIKE(self, first, second):
5757 import re 5758 return {self.expand(first): {'$regex': \ 5759 re.escape(self.expand(second, 5760 'string')).replace('%','.*')}}
5761 5762 #TODO verify full compatibilty with official SQL Like operator
5763 - def STARTSWITH(self, first, second):
5764 #TODO Solve almost the same problem as with endswith 5765 import re 5766 return {self.expand(first): {'$regex' : '^' + 5767 re.escape(self.expand(second, 5768 'string'))}}
5769 5770 #TODO verify full compatibilty with official SQL Like operator
5771 - def ENDSWITH(self, first, second):
5772 #escaping regex operators? 5773 #TODO if searched for a name like zsa_corbitt and the function 5774 # is endswith('a') then this is also returned. 5775 # Aldo it end with a t 5776 import re 5777 return {self.expand(first): {'$regex': \ 5778 re.escape(self.expand(second, 'string')) + '$'}}
5779 5780 #TODO verify full compatibilty with official oracle contains operator
5781 - def CONTAINS(self, first, second, case_sensitive=False):
5782 # silently ignore, only case sensitive 5783 #There is a technical difference, but mongodb doesn't support 5784 # that, but the result will be the same 5785 #TODO contains operators need to be transformed to Regex 5786 return {self.expand(first) : {'$regex': \ 5787 ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
5788
5789 5790 -class IMAPAdapter(NoSQLAdapter):
5791 drivers = ('imaplib',) 5792 5793 """ IMAP server adapter 5794 5795 This class is intended as an interface with 5796 email IMAP servers to perform simple queries in the 5797 web2py DAL query syntax, so email read, search and 5798 other related IMAP mail services (as those implemented 5799 by brands like Google(r), and Yahoo!(r) 5800 can be managed from web2py applications. 5801 5802 The code uses examples by Yuji Tomita on this post: 5803 http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 5804 and is based in docs for Python imaplib, python email 5805 and email IETF's (i.e. RFC2060 and RFC3501) 5806 5807 This adapter was tested with a small set of operations with Gmail(r). Other 5808 services requests could raise command syntax and response data issues. 5809 5810 It creates its table and field names "statically", 5811 meaning that the developer should leave the table and field 5812 definitions to the DAL instance by calling the adapter's 5813 .define_tables() method. The tables are defined with the 5814 IMAP server mailbox list information. 5815 5816 .define_tables() returns a dictionary mapping dal tablenames 5817 to the server mailbox names with the following structure: 5818 5819 {<tablename>: str <server mailbox name>} 5820 5821 Here is a list of supported fields: 5822 5823 Field Type Description 5824 ################################################################ 5825 uid string 5826 answered boolean Flag 5827 created date 5828 content list:string A list of text or html parts 5829 to string 5830 cc string 5831 bcc string 5832 size integer the amount of octets of the message* 5833 deleted boolean Flag 5834 draft boolean Flag 5835 flagged boolean Flag 5836 sender string 5837 recent boolean Flag 5838 seen boolean Flag 5839 subject string 5840 mime string The mime header declaration 5841 email string The complete RFC822 message** 5842 attachments <type list> Each non text part as dict 5843 encoding string The main detected encoding 5844 5845 *At the application side it is measured as the length of the RFC822 5846 message string 5847 5848 WARNING: As row id's are mapped to email sequence numbers, 5849 make sure your imap client web2py app does not delete messages 5850 during select or update actions, to prevent 5851 updating or deleting different messages. 5852 Sequence numbers change whenever the mailbox is updated. 5853 To avoid this sequence numbers issues, it is recommended the use 5854 of uid fields in query references (although the update and delete 5855 in separate actions rule still applies). 5856 5857 # This is the code recommended to start imap support 5858 # at the app's model: 5859 5860 imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl 5861 imapdb.define_tables() 5862 5863 Here is an (incomplete) list of possible imap commands: 5864 5865 # Count today's unseen messages 5866 # smaller than 6000 octets from the 5867 # inbox mailbox 5868 5869 q = imapdb.INBOX.seen == False 5870 q &= imapdb.INBOX.created == datetime.date.today() 5871 q &= imapdb.INBOX.size < 6000 5872 unread = imapdb(q).count() 5873 5874 # Fetch last query messages 5875 rows = imapdb(q).select() 5876 5877 # it is also possible to filter query select results with limitby and 5878 # sequences of mailbox fields 5879 5880 set.select(<fields sequence>, limitby=(<int>, <int>)) 5881 5882 # Mark last query messages as seen 5883 messages = [row.uid for row in rows] 5884 seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) 5885 5886 # Delete messages in the imap database that have mails from mr. Gumby 5887 5888 deleted = 0 5889 for mailbox in imapdb.tables 5890 deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() 5891 5892 # It is possible also to mark messages for deletion instead of ereasing them 5893 # directly with set.update(deleted=True) 5894 5895 5896 # This object give access 5897 # to the adapter auto mailbox 5898 # mapped names (which native 5899 # mailbox has what table name) 5900 5901 imapdb.mailboxes <dict> # tablename, server native name pairs 5902 5903 # To retrieve a table native mailbox name use: 5904 imapdb.<table>.mailbox 5905 5906 ### New features v2.4.1: 5907 5908 # Declare mailboxes statically with tablename, name pairs 5909 # This avoids the extra server names retrieval 5910 5911 imapdb.define_tables({"inbox": "INBOX"}) 5912 5913 # Selects without content/attachments/email columns will only 5914 # fetch header and flags 5915 5916 imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) 5917 """ 5918 5919 types = { 5920 'string': str, 5921 'text': str, 5922 'date': datetime.date, 5923 'datetime': datetime.datetime, 5924 'id': long, 5925 'boolean': bool, 5926 'integer': int, 5927 'bigint': long, 5928 'blob': str, 5929 'list:string': str, 5930 } 5931 5932 dbengine = 'imap' 5933 5934 REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$') 5935
5936 - def __init__(self, 5937 db, 5938 uri, 5939 pool_size=0, 5940 folder=None, 5941 db_codec ='UTF-8', 5942 credential_decoder=IDENTITY, 5943 driver_args={}, 5944 adapter_args={}, 5945 do_connect=True, 5946 after_connection=None):
5947 5948 # db uri: user@example.com:password@imap.server.com:123 5949 # TODO: max size adapter argument for preventing large mail transfers 5950 5951 self.db = db 5952 self.uri = uri 5953 if do_connect: self.find_driver(adapter_args) 5954 self.pool_size=pool_size 5955 self.folder = folder 5956 self.db_codec = db_codec 5957 self._after_connection = after_connection 5958 self.credential_decoder = credential_decoder 5959 self.driver_args = driver_args 5960 self.adapter_args = adapter_args 5961 self.mailbox_size = None 5962 self.static_names = None 5963 self.charset = sys.getfilesystemencoding() 5964 # imap class 5965 self.imap4 = None 5966 uri = uri.split("://")[1] 5967 5968 """ MESSAGE is an identifier for sequence number""" 5969 5970 self.flags = ['\\Deleted', '\\Draft', '\\Flagged', 5971 '\\Recent', '\\Seen', '\\Answered'] 5972 self.search_fields = { 5973 'id': 'MESSAGE', 'created': 'DATE', 5974 'uid': 'UID', 'sender': 'FROM', 5975 'to': 'TO', 'cc': 'CC', 5976 'bcc': 'BCC', 'content': 'TEXT', 5977 'size': 'SIZE', 'deleted': '\\Deleted', 5978 'draft': '\\Draft', 'flagged': '\\Flagged', 5979 'recent': '\\Recent', 'seen': '\\Seen', 5980 'subject': 'SUBJECT', 'answered': '\\Answered', 5981 'mime': None, 'email': None, 5982 'attachments': None 5983 } 5984 5985 db['_lastsql'] = '' 5986 5987 m = self.REGEX_URI.match(uri) 5988 user = m.group('user') 5989 password = m.group('password') 5990 host = m.group('host') 5991 port = int(m.group('port')) 5992 over_ssl = False 5993 if port==993: 5994 over_ssl = True 5995 5996 driver_args.update(host=host,port=port, password=password, user=user) 5997 def connector(driver_args=driver_args): 5998 # it is assumed sucessful authentication alLways 5999 # TODO: support direct connection and login tests 6000 if over_ssl: 6001 self.imap4 = self.driver.IMAP4_SSL 6002 else: 6003 self.imap4 = self.driver.IMAP4 6004 connection = self.imap4(driver_args["host"], driver_args["port"]) 6005 data = connection.login(driver_args["user"], driver_args["password"]) 6006 6007 # static mailbox list 6008 connection.mailbox_names = None 6009 6010 # dummy cursor function 6011 connection.cursor = lambda : True 6012 6013 return connection
6014 6015 self.db.define_tables = self.define_tables 6016 self.connector = connector 6017 if do_connect: self.reconnect()
6018
6019 - def reconnect(self, f=None, cursor=True):
6020 """ 6021 IMAP4 Pool connection method 6022 6023 imap connection lacks of self cursor command. 6024 A custom command should be provided as a replacement 6025 for connection pooling to prevent uncaught remote session 6026 closing 6027 6028 """ 6029 if getattr(self,'connection',None) != None: 6030 return 6031 if f is None: 6032 f = self.connector 6033 6034 if not self.pool_size: 6035 self.connection = f() 6036 self.cursor = cursor and self.connection.cursor() 6037 else: 6038 POOLS = ConnectionPool.POOLS 6039 uri = self.uri 6040 while True: 6041 GLOBAL_LOCKER.acquire() 6042 if not uri in POOLS: 6043 POOLS[uri] = [] 6044 if POOLS[uri]: 6045 self.connection = POOLS[uri].pop() 6046 GLOBAL_LOCKER.release() 6047 self.cursor = cursor and self.connection.cursor() 6048 if self.cursor and self.check_active_connection: 6049 try: 6050 # check if connection is alive or close it 6051 result, data = self.connection.list() 6052 except: 6053 # Possible connection reset error 6054 # TODO: read exception class 6055 self.connection = f() 6056 break 6057 else: 6058 GLOBAL_LOCKER.release() 6059 self.connection = f() 6060 self.cursor = cursor and self.connection.cursor() 6061 break 6062 self.after_connection_hook()
6063
6064 - def get_last_message(self, tablename):
6065 last_message = None 6066 # request mailbox list to the server 6067 # if needed 6068 if not isinstance(self.connection.mailbox_names, dict): 6069 self.get_mailboxes() 6070 try: 6071 result = self.connection.select(self.connection.mailbox_names[tablename]) 6072 last_message = int(result[1][0]) 6073 except (IndexError, ValueError, TypeError, KeyError): 6074 e = sys.exc_info()[1] 6075 LOGGER.debug("Error retrieving the last mailbox sequence number. %s" % str(e)) 6076 return last_message
6077
6078 - def get_uid_bounds(self, tablename):
6079 if not isinstance(self.connection.mailbox_names, dict): 6080 self.get_mailboxes() 6081 # fetch first and last messages 6082 # return (first, last) messages uid's 6083 last_message = self.get_last_message(tablename) 6084 result, data = self.connection.uid("search", None, "(ALL)") 6085 uid_list = data[0].strip().split() 6086 if len(uid_list) <= 0: 6087 return None 6088 else: 6089 return (uid_list[0], uid_list[-1])
6090
6091 - def convert_date(self, date, add=None):
6092 if add is None: 6093 add = datetime.timedelta() 6094 """ Convert a date object to a string 6095 with d-Mon-Y style for IMAP or the inverse 6096 case 6097 6098 add <timedelta> adds to the date object 6099 """ 6100 months = [None, "JAN","FEB","MAR","APR","MAY","JUN", 6101 "JUL", "AUG","SEP","OCT","NOV","DEC"] 6102 if isinstance(date, basestring): 6103 # Prevent unexpected date response format 6104 try: 6105 dayname, datestring = date.split(",") 6106 date_list = datestring.strip().split() 6107 year = int(date_list[2]) 6108 month = months.index(date_list[1].upper()) 6109 day = int(date_list[0]) 6110 hms = map(int, date_list[3].split(":")) 6111 return datetime.datetime(year, month, day, 6112 hms[0], hms[1], hms[2]) + add 6113 except (ValueError, AttributeError, IndexError), e: 6114 LOGGER.error("Could not parse date text: %s. %s" % 6115 (date, e)) 6116 return None 6117 elif isinstance(date, (datetime.datetime, datetime.date)): 6118 return (date + add).strftime("%d-%b-%Y") 6119 else: 6120 return None
6121 6122 @staticmethod
6123 - def header_represent(f, r):
6124 from email.header import decode_header 6125 text, encoding = decode_header(f)[0] 6126 if encoding: 6127 text = text.decode(encoding).encode('utf-8') 6128 return text
6129
6130 - def encode_text(self, text, charset, errors="replace"):
6131 """ convert text for mail to unicode""" 6132 if text is None: 6133 text = "" 6134 else: 6135 if isinstance(text, str): 6136 if charset is None: 6137 text = unicode(text, "utf-8", errors) 6138 else: 6139 text = unicode(text, charset, errors) 6140 else: 6141 raise Exception("Unsupported mail text type %s" % type(text)) 6142 return text.encode("utf-8")
6143
6144 - def get_charset(self, message):
6145 charset = message.get_content_charset() 6146 return charset
6147
6148 - def get_mailboxes(self):
6149 """ Query the mail database for mailbox names """ 6150 if self.static_names: 6151 # statically defined mailbox names 6152 self.connection.mailbox_names = self.static_names 6153 return self.static_names.keys() 6154 6155 mailboxes_list = self.connection.list() 6156 self.connection.mailbox_names = dict() 6157 mailboxes = list() 6158 x = 0 6159 for item in mailboxes_list[1]: 6160 x = x + 1 6161 item = item.strip() 6162 if not "NOSELECT" in item.upper(): 6163 sub_items = item.split("\"") 6164 sub_items = [sub_item for sub_item in sub_items \ 6165 if len(sub_item.strip()) > 0] 6166 # mailbox = sub_items[len(sub_items) -1] 6167 mailbox = sub_items[-1] 6168 # remove unwanted characters and store original names 6169 # Don't allow leading non alphabetic characters 6170 mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) 6171 mailboxes.append(mailbox_name) 6172 self.connection.mailbox_names[mailbox_name] = mailbox 6173 6174 return mailboxes
6175
6176 - def get_query_mailbox(self, query):
6177 nofield = True 6178 tablename = None 6179 attr = query 6180 while nofield: 6181 if hasattr(attr, "first"): 6182 attr = attr.first 6183 if isinstance(attr, Field): 6184 return attr.tablename 6185 elif isinstance(attr, Query): 6186 pass 6187 else: 6188 return None 6189 else: 6190 return None 6191 return tablename
6192
6193 - def is_flag(self, flag):
6194 if self.search_fields.get(flag, None) in self.flags: 6195 return True 6196 else: 6197 return False
6198
6199 - def define_tables(self, mailbox_names=None):
6200 """ 6201 Auto create common IMAP fileds 6202 6203 This function creates fields definitions "statically" 6204 meaning that custom fields as in other adapters should 6205 not be supported and definitions handled on a service/mode 6206 basis (local syntax for Gmail(r), Ymail(r) 6207 6208 Returns a dictionary with tablename, server native mailbox name 6209 pairs. 6210 """ 6211 if mailbox_names: 6212 # optional statically declared mailboxes 6213 self.static_names = mailbox_names 6214 else: 6215 self.static_names = None 6216 if not isinstance(self.connection.mailbox_names, dict): 6217 self.get_mailboxes() 6218 6219 names = self.connection.mailbox_names.keys() 6220 6221 for name in names: 6222 self.db.define_table("%s" % name, 6223 Field("uid", "string", writable=False), 6224 Field("answered", "boolean"), 6225 Field("created", "datetime", writable=False), 6226 Field("content", "list:string", writable=False), 6227 Field("to", "string", writable=False), 6228 Field("cc", "string", writable=False), 6229 Field("bcc", "string", writable=False), 6230 Field("size", "integer", writable=False), 6231 Field("deleted", "boolean"), 6232 Field("draft", "boolean"), 6233 Field("flagged", "boolean"), 6234 Field("sender", "string", writable=False), 6235 Field("recent", "boolean", writable=False), 6236 Field("seen", "boolean"), 6237 Field("subject", "string", writable=False), 6238 Field("mime", "string", writable=False), 6239 Field("email", "string", writable=False, readable=False), 6240 Field("attachments", list, writable=False, readable=False), 6241 Field("encoding", writable=False) 6242 ) 6243 6244 # Set a special _mailbox attribute for storing 6245 # native mailbox names 6246 self.db[name].mailbox = \ 6247 self.connection.mailbox_names[name] 6248 6249 # decode quoted printable 6250 self.db[name].to.represent = self.db[name].cc.represent = \ 6251 self.db[name].bcc.represent = self.db[name].sender.represent = \ 6252 self.db[name].subject.represent = self.header_represent 6253 6254 # Set the db instance mailbox collections 6255 self.db.mailboxes = self.connection.mailbox_names 6256 return self.db.mailboxes
6257
6258 - def create_table(self, *args, **kwargs):
6259 # not implemented 6260 # but required by DAL 6261 pass
6262
6263 - def _select(self, query, fields, attributes):
6264 if use_common_filters(query): 6265 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6266 return str(query)
6267
6268 - def select(self, query, fields, attributes):
6269 """ Search and Fetch records and return web2py rows 6270 """ 6271 # move this statement elsewhere (upper-level) 6272 if use_common_filters(query): 6273 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6274 6275 import email 6276 # get records from imap server with search + fetch 6277 # convert results to a dictionary 6278 tablename = None 6279 fetch_results = list() 6280 6281 if isinstance(query, Query): 6282 tablename = self.get_table(query) 6283 mailbox = self.connection.mailbox_names.get(tablename, None) 6284 if mailbox is None: 6285 raise ValueError("Mailbox name not found: %s" % mailbox) 6286 else: 6287 # select with readonly 6288 result, selected = self.connection.select(mailbox, True) 6289 if result != "OK": 6290 raise Exception("IMAP error: %s" % selected) 6291 self.mailbox_size = int(selected[0]) 6292 search_query = "(%s)" % str(query).strip() 6293 search_result = self.connection.uid("search", None, search_query) 6294 # Normal IMAP response OK is assumed (change this) 6295 if search_result[0] == "OK": 6296 # For "light" remote server responses just get the first 6297 # ten records (change for non-experimental implementation) 6298 # However, light responses are not guaranteed with this 6299 # approach, just fewer messages. 6300 limitby = attributes.get('limitby', None) 6301 messages_set = search_result[1][0].split() 6302 # descending order 6303 messages_set.reverse() 6304 if limitby is not None: 6305 # TODO: orderby, asc/desc, limitby from complete message set 6306 messages_set = messages_set[int(limitby[0]):int(limitby[1])] 6307 6308 # keep the requests small for header/flags 6309 if any([(field.name in ["content", "size", 6310 "attachments", "email"]) for 6311 field in fields]): 6312 imap_fields = "(RFC822 FLAGS)" 6313 else: 6314 imap_fields = "(RFC822.HEADER FLAGS)" 6315 6316 if len(messages_set) > 0: 6317 # create fetch results object list 6318 # fetch each remote message and store it in memmory 6319 # (change to multi-fetch command syntax for faster 6320 # transactions) 6321 for uid in messages_set: 6322 # fetch the RFC822 message body 6323 typ, data = self.connection.uid("fetch", uid, imap_fields) 6324 if typ == "OK": 6325 fr = {"message": int(data[0][0].split()[0]), 6326 "uid": long(uid), 6327 "email": email.message_from_string(data[0][1]), 6328 "raw_message": data[0][1]} 6329 fr["multipart"] = fr["email"].is_multipart() 6330 # fetch flags for the message 6331 fr["flags"] = self.driver.ParseFlags(data[1]) 6332 fetch_results.append(fr) 6333 else: 6334 # error retrieving the message body 6335 raise Exception("IMAP error retrieving the body: %s" % data) 6336 else: 6337 raise Exception("IMAP search error: %s" % search_result[1]) 6338 elif isinstance(query, (Expression, basestring)): 6339 raise NotImplementedError() 6340 else: 6341 raise TypeError("Unexpected query type") 6342 6343 imapqry_dict = {} 6344 imapfields_dict = {} 6345 6346 if len(fields) == 1 and isinstance(fields[0], SQLALL): 6347 allfields = True 6348 elif len(fields) == 0: 6349 allfields = True 6350 else: 6351 allfields = False 6352 if allfields: 6353 colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] 6354 else: 6355 colnames = ["%s.%s" % (tablename, field.name) for field in fields] 6356 6357 for k in colnames: 6358 imapfields_dict[k] = k 6359 6360 imapqry_list = list() 6361 imapqry_array = list() 6362 for fr in fetch_results: 6363 attachments = [] 6364 content = [] 6365 size = 0 6366 n = int(fr["message"]) 6367 item_dict = dict() 6368 message = fr["email"] 6369 uid = fr["uid"] 6370 charset = self.get_charset(message) 6371 flags = fr["flags"] 6372 raw_message = fr["raw_message"] 6373 # Return messages data mapping static fields 6374 # and fetched results. Mapping should be made 6375 # outside the select function (with auxiliary 6376 # instance methods) 6377 6378 # pending: search flags states trough the email message 6379 # instances for correct output 6380 6381 # preserve subject encoding (ASCII/quoted printable) 6382 6383 if "%s.id" % tablename in colnames: 6384 item_dict["%s.id" % tablename] = n 6385 if "%s.created" % tablename in colnames: 6386 item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) 6387 if "%s.uid" % tablename in colnames: 6388 item_dict["%s.uid" % tablename] = uid 6389 if "%s.sender" % tablename in colnames: 6390 # If there is no encoding found in the message header 6391 # force utf-8 replacing characters (change this to 6392 # module's defaults). Applies to .sender, .to, .cc and .bcc fields 6393 item_dict["%s.sender" % tablename] = message["From"] 6394 if "%s.to" % tablename in colnames: 6395 item_dict["%s.to" % tablename] = message["To"] 6396 if "%s.cc" % tablename in colnames: 6397 if "Cc" in message.keys(): 6398 item_dict["%s.cc" % tablename] = message["Cc"] 6399 else: 6400 item_dict["%s.cc" % tablename] = "" 6401 if "%s.bcc" % tablename in colnames: 6402 if "Bcc" in message.keys(): 6403 item_dict["%s.bcc" % tablename] = message["Bcc"] 6404 else: 6405 item_dict["%s.bcc" % tablename] = "" 6406 if "%s.deleted" % tablename in colnames: 6407 item_dict["%s.deleted" % tablename] = "\\Deleted" in flags 6408 if "%s.draft" % tablename in colnames: 6409 item_dict["%s.draft" % tablename] = "\\Draft" in flags 6410 if "%s.flagged" % tablename in colnames: 6411 item_dict["%s.flagged" % tablename] = "\\Flagged" in flags 6412 if "%s.recent" % tablename in colnames: 6413 item_dict["%s.recent" % tablename] = "\\Recent" in flags 6414 if "%s.seen" % tablename in colnames: 6415 item_dict["%s.seen" % tablename] = "\\Seen" in flags 6416 if "%s.subject" % tablename in colnames: 6417 item_dict["%s.subject" % tablename] = message["Subject"] 6418 if "%s.answered" % tablename in colnames: 6419 item_dict["%s.answered" % tablename] = "\\Answered" in flags 6420 if "%s.mime" % tablename in colnames: 6421 item_dict["%s.mime" % tablename] = message.get_content_type() 6422 if "%s.encoding" % tablename in colnames: 6423 item_dict["%s.encoding" % tablename] = charset 6424 6425 # Here goes the whole RFC822 body as an email instance 6426 # for controller side custom processing 6427 # The message is stored as a raw string 6428 # >> email.message_from_string(raw string) 6429 # returns a Message object for enhanced object processing 6430 if "%s.email" % tablename in colnames: 6431 # WARNING: no encoding performed (raw message) 6432 item_dict["%s.email" % tablename] = raw_message 6433 6434 # Size measure as suggested in a Velocity Reviews post 6435 # by Tim Williams: "how to get size of email attachment" 6436 # Note: len() and server RFC822.SIZE reports doesn't match 6437 # To retrieve the server size for representation would add a new 6438 # fetch transaction to the process 6439 for part in message.walk(): 6440 maintype = part.get_content_maintype() 6441 if ("%s.attachments" % tablename in colnames) or \ 6442 ("%s.content" % tablename in colnames): 6443 if "%s.attachments" % tablename in colnames: 6444 if not ("text" in maintype): 6445 payload = part.get_payload(decode=True) 6446 if payload: 6447 attachment = { 6448 "payload": payload, 6449 "filename": part.get_filename(), 6450 "encoding": part.get_content_charset(), 6451 "mime": part.get_content_type(), 6452 "disposition": part["Content-Disposition"]} 6453 attachments.append(attachment) 6454 if "%s.content" % tablename in colnames: 6455 payload = part.get_payload(decode=True) 6456 part_charset = self.get_charset(part) 6457 if "text" in maintype: 6458 if payload: 6459 content.append(self.encode_text(payload, part_charset)) 6460 if "%s.size" % tablename in colnames: 6461 if part is not None: 6462 size += len(str(part)) 6463 item_dict["%s.content" % tablename] = content 6464 item_dict["%s.attachments" % tablename] = attachments 6465 item_dict["%s.size" % tablename] = size 6466 imapqry_list.append(item_dict) 6467 6468 # extra object mapping for the sake of rows object 6469 # creation (sends an array or lists) 6470 for item_dict in imapqry_list: 6471 imapqry_array_item = list() 6472 for fieldname in colnames: 6473 imapqry_array_item.append(item_dict[fieldname]) 6474 imapqry_array.append(imapqry_array_item) 6475 6476 # parse result and return a rows object 6477 colnames = colnames 6478 processor = attributes.get('processor',self.parse) 6479 return processor(imapqry_array, fields, colnames)
6480
6481 - def _update(self, tablename, query, fields, commit=False):
6482 # TODO: the adapter should implement an .expand method 6483 commands = list() 6484 if use_common_filters(query): 6485 query = self.common_filter(query, [tablename,]) 6486 mark = [] 6487 unmark = [] 6488 if query: 6489 for item in fields: 6490 field = item[0] 6491 name = field.name 6492 value = item[1] 6493 if self.is_flag(name): 6494 flag = self.search_fields[name] 6495 if (value is not None) and (flag != "\\Recent"): 6496 if value: 6497 mark.append(flag) 6498 else: 6499 unmark.append(flag) 6500 result, data = self.connection.select( 6501 self.connection.mailbox_names[tablename]) 6502 string_query = "(%s)" % query 6503 result, data = self.connection.search(None, string_query) 6504 store_list = [item.strip() for item in data[0].split() 6505 if item.strip().isdigit()] 6506 # build commands for marked flags 6507 for number in store_list: 6508 result = None 6509 if len(mark) > 0: 6510 commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) 6511 if len(unmark) > 0: 6512 commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) 6513 return commands
6514
6515 - def update(self, tablename, query, fields):
6516 rowcount = 0 6517 commands = self._update(tablename, query, fields) 6518 for command in commands: 6519 result, data = self.connection.store(*command) 6520 if result == "OK": 6521 rowcount += 1 6522 else: 6523 raise Exception("IMAP storing error: %s" % data) 6524 return rowcount
6525
6526 - def _count(self, query, distinct=None):
6527 raise NotImplementedError()
6528
6529 - def count(self,query,distinct=None):
6530 counter = 0 6531 tablename = self.get_query_mailbox(query) 6532 if query and tablename is not None: 6533 if use_common_filters(query): 6534 query = self.common_filter(query, [tablename,]) 6535 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6536 string_query = "(%s)" % query 6537 result, data = self.connection.search(None, string_query) 6538 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6539 counter = len(store_list) 6540 return counter
6541
6542 - def delete(self, tablename, query):
6543 counter = 0 6544 if query: 6545 if use_common_filters(query): 6546 query = self.common_filter(query, [tablename,]) 6547 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6548 string_query = "(%s)" % query 6549 result, data = self.connection.search(None, string_query) 6550 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6551 for number in store_list: 6552 result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") 6553 if result == "OK": 6554 counter += 1 6555 else: 6556 raise Exception("IMAP store error: %s" % data) 6557 if counter > 0: 6558 result, data = self.connection.expunge() 6559 return counter
6560
6561 - def BELONGS(self, first, second):
6562 result = None 6563 name = self.search_fields[first.name] 6564 if name == "MESSAGE": 6565 values = [str(val) for val in second if str(val).isdigit()] 6566 result = "%s" % ",".join(values).strip() 6567 6568 elif name == "UID": 6569 values = [str(val) for val in second if str(val).isdigit()] 6570 result = "UID %s" % ",".join(values).strip() 6571 6572 else: 6573 raise Exception("Operation not supported") 6574 # result = "(%s %s)" % (self.expand(first), self.expand(second)) 6575 return result
6576
6577 - def CONTAINS(self, first, second, case_sensitive=False):
6578 # silently ignore, only case sensitive 6579 result = None 6580 name = self.search_fields[first.name] 6581 6582 if name in ("FROM", "TO", "SUBJECT", "TEXT"): 6583 result = "%s \"%s\"" % (name, self.expand(second)) 6584 else: 6585 if first.name in ("cc", "bcc"): 6586 result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) 6587 elif first.name == "mime": 6588 result = "HEADER Content-Type \"%s\"" % self.expand(second) 6589 else: 6590 raise Exception("Operation not supported") 6591 return result
6592
6593 - def GT(self, first, second):
6594 result = None 6595 name = self.search_fields[first.name] 6596 if name == "MESSAGE": 6597 last_message = self.get_last_message(first.tablename) 6598 result = "%d:%d" % (int(self.expand(second)) + 1, last_message) 6599 elif name == "UID": 6600 # GT and LT may not return 6601 # expected sets depending on 6602 # the uid format implemented 6603 try: 6604 pedestal, threshold = self.get_uid_bounds(first.tablename) 6605 except TypeError: 6606 e = sys.exc_info()[1] 6607 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6608 return "" 6609 try: 6610 lower_limit = int(self.expand(second)) + 1 6611 except (ValueError, TypeError): 6612 e = sys.exc_info()[1] 6613 raise Exception("Operation not supported (non integer UID)") 6614 result = "UID %s:%s" % (lower_limit, threshold) 6615 elif name == "DATE": 6616 result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6617 elif name == "SIZE": 6618 result = "LARGER %s" % self.expand(second) 6619 else: 6620 raise Exception("Operation not supported") 6621 return result
6622
6623 - def GE(self, first, second):
6624 result = None 6625 name = self.search_fields[first.name] 6626 if name == "MESSAGE": 6627 last_message = self.get_last_message(first.tablename) 6628 result = "%s:%s" % (self.expand(second), last_message) 6629 elif name == "UID": 6630 # GT and LT may not return 6631 # expected sets depending on 6632 # the uid format implemented 6633 try: 6634 pedestal, threshold = self.get_uid_bounds(first.tablename) 6635 except TypeError: 6636 e = sys.exc_info()[1] 6637 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6638 return "" 6639 lower_limit = self.expand(second) 6640 result = "UID %s:%s" % (lower_limit, threshold) 6641 elif name == "DATE": 6642 result = "SINCE %s" % self.convert_date(second) 6643 else: 6644 raise Exception("Operation not supported") 6645 return result
6646
6647 - def LT(self, first, second):
6648 result = None 6649 name = self.search_fields[first.name] 6650 if name == "MESSAGE": 6651 result = "%s:%s" % (1, int(self.expand(second)) - 1) 6652 elif name == "UID": 6653 try: 6654 pedestal, threshold = self.get_uid_bounds(first.tablename) 6655 except TypeError: 6656 e = sys.exc_info()[1] 6657 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6658 return "" 6659 try: 6660 upper_limit = int(self.expand(second)) - 1 6661 except (ValueError, TypeError): 6662 e = sys.exc_info()[1] 6663 raise Exception("Operation not supported (non integer UID)") 6664 result = "UID %s:%s" % (pedestal, upper_limit) 6665 elif name == "DATE": 6666 result = "BEFORE %s" % self.convert_date(second) 6667 elif name == "SIZE": 6668 result = "SMALLER %s" % self.expand(second) 6669 else: 6670 raise Exception("Operation not supported") 6671 return result
6672
6673 - def LE(self, first, second):
6674 result = None 6675 name = self.search_fields[first.name] 6676 if name == "MESSAGE": 6677 result = "%s:%s" % (1, self.expand(second)) 6678 elif name == "UID": 6679 try: 6680 pedestal, threshold = self.get_uid_bounds(first.tablename) 6681 except TypeError: 6682 e = sys.exc_info()[1] 6683 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6684 return "" 6685 upper_limit = int(self.expand(second)) 6686 result = "UID %s:%s" % (pedestal, upper_limit) 6687 elif name == "DATE": 6688 result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6689 else: 6690 raise Exception("Operation not supported") 6691 return result
6692
6693 - def NE(self, first, second=None):
6694 if (second is None) and isinstance(first, Field): 6695 # All records special table query 6696 if first.type == "id": 6697 return self.GE(first, 1) 6698 result = self.NOT(self.EQ(first, second)) 6699 result = result.replace("NOT NOT", "").strip() 6700 return result
6701
6702 - def EQ(self,first,second):
6703 name = self.search_fields[first.name] 6704 result = None 6705 if name is not None: 6706 if name == "MESSAGE": 6707 # query by message sequence number 6708 result = "%s" % self.expand(second) 6709 elif name == "UID": 6710 result = "UID %s" % self.expand(second) 6711 elif name == "DATE": 6712 result = "ON %s" % self.convert_date(second) 6713 6714 elif name in self.flags: 6715 if second: 6716 result = "%s" % (name.upper()[1:]) 6717 else: 6718 result = "NOT %s" % (name.upper()[1:]) 6719 else: 6720 raise Exception("Operation not supported") 6721 else: 6722 raise Exception("Operation not supported") 6723 return result
6724
6725 - def AND(self, first, second):
6726 result = "%s %s" % (self.expand(first), self.expand(second)) 6727 return result
6728
6729 - def OR(self, first, second):
6730 result = "OR %s %s" % (self.expand(first), self.expand(second)) 6731 return "%s" % result.replace("OR OR", "OR")
6732
6733 - def NOT(self, first):
6734 result = "NOT %s" % self.expand(first) 6735 return result
6736 6737 ######################################################################## 6738 # end of adapters 6739 ######################################################################## 6740 6741 ADAPTERS = { 6742 'sqlite': SQLiteAdapter, 6743 'spatialite': SpatiaLiteAdapter, 6744 'sqlite:memory': SQLiteAdapter, 6745 'spatialite:memory': SpatiaLiteAdapter, 6746 'mysql': MySQLAdapter, 6747 'postgres': PostgreSQLAdapter, 6748 'postgres:psycopg2': PostgreSQLAdapter, 6749 'postgres:pg8000': PostgreSQLAdapter, 6750 'postgres2:psycopg2': NewPostgreSQLAdapter, 6751 'postgres2:pg8000': NewPostgreSQLAdapter, 6752 'oracle': OracleAdapter, 6753 'mssql': MSSQLAdapter, 6754 'mssql2': MSSQL2Adapter, 6755 'mssql3': MSSQL3Adapter, 6756 'vertica': VerticaAdapter, 6757 'sybase': SybaseAdapter, 6758 'db2': DB2Adapter, 6759 'teradata': TeradataAdapter, 6760 'informix': InformixAdapter, 6761 'informix-se': InformixSEAdapter, 6762 'firebird': FireBirdAdapter, 6763 'firebird_embedded': FireBirdAdapter, 6764 'ingres': IngresAdapter, 6765 'ingresu': IngresUnicodeAdapter, 6766 'sapdb': SAPDBAdapter, 6767 'cubrid': CubridAdapter, 6768 'jdbc:sqlite': JDBCSQLiteAdapter, 6769 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 6770 'jdbc:postgres': JDBCPostgreSQLAdapter, 6771 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 6772 'google:datastore': GoogleDatastoreAdapter, 6773 'google:sql': GoogleSQLAdapter, 6774 'couchdb': CouchDBAdapter, 6775 'mongodb': MongoDBAdapter, 6776 'imap': IMAPAdapter 6777 }
6778 6779 -def sqlhtml_validators(field):
6780 """ 6781 Field type validation, using web2py's validators mechanism. 6782 6783 makes sure the content of a field is in line with the declared 6784 fieldtype 6785 """ 6786 db = field.db 6787 if not have_validators: 6788 return [] 6789 field_type, field_length = field.type, field.length 6790 if isinstance(field_type, SQLCustomType): 6791 if hasattr(field_type, 'validator'): 6792 return field_type.validator 6793 else: 6794 field_type = field_type.type 6795 elif not isinstance(field_type,str): 6796 return [] 6797 requires=[] 6798 def ff(r,id): 6799 row=r(id) 6800 if not row: 6801 return id 6802 elif hasattr(r, '_format') and isinstance(r._format,str): 6803 return r._format % row 6804 elif hasattr(r, '_format') and callable(r._format): 6805 return r._format(row) 6806 else: 6807 return id
6808 if field_type in (('string', 'text', 'password')): 6809 requires.append(validators.IS_LENGTH(field_length)) 6810 elif field_type == 'json': 6811 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON(native_json=field.db._adapter.native_json))) 6812 elif field_type == 'double' or field_type == 'float': 6813 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 6814 elif field_type in ('integer','bigint'): 6815 requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) 6816 elif field_type.startswith('decimal'): 6817 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 6818 elif field_type == 'date': 6819 requires.append(validators.IS_DATE()) 6820 elif field_type == 'time': 6821 requires.append(validators.IS_TIME()) 6822 elif field_type == 'datetime': 6823 requires.append(validators.IS_DATETIME()) 6824 elif db and field_type.startswith('reference') and \ 6825 field_type.find('.') < 0 and \ 6826 field_type[10:] in db.tables: 6827 referenced = db[field_type[10:]] 6828 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 6829 field.represent = field.represent or repr_ref 6830 if hasattr(referenced, '_format') and referenced._format: 6831 requires = validators.IS_IN_DB(db,referenced._id, 6832 referenced._format) 6833 if field.unique: 6834 requires._and = validators.IS_NOT_IN_DB(db,field) 6835 if field.tablename == field_type[10:]: 6836 return validators.IS_EMPTY_OR(requires) 6837 return requires 6838 elif db and field_type.startswith('list:reference') and \ 6839 field_type.find('.') < 0 and \ 6840 field_type[15:] in db.tables: 6841 referenced = db[field_type[15:]] 6842 def list_ref_repr(ids, row=None, r=referenced, f=ff): 6843 if not ids: 6844 return None 6845 refs = None 6846 db, id = r._db, r._id 6847 if isinstance(db._adapter, GoogleDatastoreAdapter): 6848 def count(values): return db(id.belongs(values)).select(id) 6849 rx = range(0, len(ids), 30) 6850 refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) 6851 else: 6852 refs = db(id.belongs(ids)).select(id) 6853 return (refs and ', '.join(str(f(r,x.id)) for x in refs) or '') 6854 field.represent = field.represent or list_ref_repr 6855 if hasattr(referenced, '_format') and referenced._format: 6856 requires = validators.IS_IN_DB(db,referenced._id, 6857 referenced._format,multiple=True) 6858 else: 6859 requires = validators.IS_IN_DB(db,referenced._id, 6860 multiple=True) 6861 if field.unique: 6862 requires._and = validators.IS_NOT_IN_DB(db,field) 6863 return requires 6864 elif field_type.startswith('list:'): 6865 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 6866 field.represent = field.represent or repr_list 6867 if field.unique: 6868 requires.insert(0,validators.IS_NOT_IN_DB(db,field)) 6869 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 6870 if field.notnull and not field_type[:2] in sff: 6871 requires.insert(0, validators.IS_NOT_EMPTY()) 6872 elif not field.notnull and field_type[:2] in sff and requires: 6873 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 6874 return requires 6875
6876 6877 -def bar_escape(item):
6878 return str(item).replace('|', '||')
6879
6880 -def bar_encode(items):
6881 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
6882
6883 -def bar_decode_integer(value):
6884 if not hasattr(value,'split') and hasattr(value,'read'): 6885 value = value.read() 6886 return [long(x) for x in value.split('|') if x.strip()]
6887
6888 -def bar_decode_string(value):
6889 return [x.replace('||', '|') for x in 6890 REGEX_UNPACK.split(value[1:-1]) if x.strip()]
6891
6892 6893 -class Row(object):
6894 6895 """ 6896 a dictionary that lets you do d['a'] as well as d.a 6897 this is only used to store a Row 6898 """ 6899 6900 __init__ = lambda self,*args,**kwargs: self.__dict__.update(*args,**kwargs) 6901
6902 - def __getitem__(self, k):
6903 key=str(k) 6904 _extra = self.__dict__.get('_extra', None) 6905 if _extra is not None: 6906 v = _extra.get(key, DEFAULT) 6907 if v != DEFAULT: 6908 return v 6909 m = REGEX_TABLE_DOT_FIELD.match(key) 6910 if m: 6911 try: 6912 return ogetattr(self, m.group(1))[m.group(2)] 6913 except (KeyError,AttributeError,TypeError): 6914 key = m.group(2) 6915 return ogetattr(self, key)
6916 6917 __setitem__ = lambda self, key, value: setattr(self, str(key), value) 6918 6919 __delitem__ = object.__delattr__ 6920 6921 __copy__ = lambda self: Row(self) 6922 6923 __call__ = __getitem__ 6924 6925 get = lambda self, key, default=None: self.__dict__.get(key,default) 6926 6927 6928 has_key = __contains__ = lambda self, key: key in self.__dict__ 6929 6930 __nonzero__ = lambda self: len(self.__dict__)>0 6931 6932 update = lambda self, *args, **kwargs: self.__dict__.update(*args, **kwargs) 6933 6934 keys = lambda self: self.__dict__.keys() 6935 6936 items = lambda self: self.__dict__.items() 6937 6938 values = lambda self: self.__dict__.values() 6939 6940 __iter__ = lambda self: self.__dict__.__iter__() 6941 6942 iteritems = lambda self: self.__dict__.iteritems() 6943 6944 __str__ = __repr__ = lambda self: '<Row %s>' % self.as_dict() 6945 6946 __int__ = lambda self: object.__getattribute__(self,'id') 6947 6948 __long__ = lambda self: long(object.__getattribute__(self,'id')) 6949 6950
6951 - def __eq__(self,other):
6952 try: 6953 return self.as_dict() == other.as_dict() 6954 except AttributeError: 6955 return False
6956
6957 - def __ne__(self,other):
6958 return not (self == other)
6959
6960 - def __copy__(self):
6961 return Row(dict(self))
6962
6963 - def as_dict(self, datetime_to_str=False, custom_types=None):
6964 SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] 6965 if isinstance(custom_types,(list,tuple,set)): 6966 SERIALIZABLE_TYPES += custom_types 6967 elif custom_types: 6968 SERIALIZABLE_TYPES.append(custom_types) 6969 d = dict(self) 6970 for k in copy.copy(d.keys()): 6971 v=d[k] 6972 if d[k] is None: 6973 continue 6974 elif isinstance(v,Row): 6975 d[k]=v.as_dict() 6976 elif isinstance(v,Reference): 6977 d[k]=long(v) 6978 elif isinstance(v,decimal.Decimal): 6979 d[k]=float(v) 6980 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 6981 if datetime_to_str: 6982 d[k] = v.isoformat().replace('T',' ')[:19] 6983 elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): 6984 del d[k] 6985 return d
6986
6987 - def as_xml(self, row_name="row", colnames=None, indent=' '):
6988 def f(row,field,indent=' '): 6989 if isinstance(row,Row): 6990 spc = indent+' \n' 6991 items = [f(row[x],x,indent+' ') for x in row] 6992 return '%s<%s>\n%s\n%s</%s>' % ( 6993 indent, 6994 field, 6995 spc.join(item for item in items if item), 6996 indent, 6997 field) 6998 elif not callable(row): 6999 if REGEX_ALPHANUMERIC.match(field): 7000 return '%s<%s>%s</%s>' % (indent,field,row,field) 7001 else: 7002 return '%s<extra name="%s">%s</extra>' % \ 7003 (indent,field,row) 7004 else: 7005 return None
7006 return f(self, row_name, indent=indent)
7007
7008 - def as_json(self, mode="object", default=None, colnames=None, 7009 serialize=True, **kwargs):
7010 """ 7011 serializes the row to a JSON object 7012 kwargs are passed to .as_dict method 7013 only "object" mode supported 7014 7015 serialize = False used by Rows.as_json 7016 TODO: return array mode with query column order 7017 7018 mode and colnames are not implemented 7019 """ 7020 7021 item = self.as_dict(**kwargs) 7022 if serialize: 7023 if have_serializers: 7024 return serializers.json(item, 7025 default=default or 7026 serializers.custom_json) 7027 elif simplejson: 7028 return simplejson.dumps(item) 7029 else: 7030 raise RuntimeError("missing simplejson") 7031 else: 7032 return item
7033
7034 7035 ################################################################################ 7036 # Everything below should be independent of the specifics of the database 7037 # and should work for RDBMs and some NoSQL databases 7038 ################################################################################ 7039 7040 -class SQLCallableList(list):
7041 - def __call__(self):
7042 return copy.copy(self)
7043
7044 -def smart_query(fields,text):
7045 if not isinstance(fields,(list,tuple)): 7046 fields = [fields] 7047 new_fields = [] 7048 for field in fields: 7049 if isinstance(field,Field): 7050 new_fields.append(field) 7051 elif isinstance(field,Table): 7052 for ofield in field: 7053 new_fields.append(ofield) 7054 else: 7055 raise RuntimeError("fields must be a list of fields") 7056 fields = new_fields 7057 field_map = {} 7058 for field in fields: 7059 n = field.name.lower() 7060 if not n in field_map: 7061 field_map[n] = field 7062 n = str(field).lower() 7063 if not n in field_map: 7064 field_map[n] = field 7065 constants = {} 7066 i = 0 7067 while True: 7068 m = REGEX_CONST_STRING.search(text) 7069 if not m: break 7070 text = text[:m.start()]+('#%i' % i)+text[m.end():] 7071 constants[str(i)] = m.group()[1:-1] 7072 i+=1 7073 text = re.sub('\s+',' ',text).lower() 7074 for a,b in [('&','and'), 7075 ('|','or'), 7076 ('~','not'), 7077 ('==','='), 7078 ('<','<'), 7079 ('>','>'), 7080 ('<=','<='), 7081 ('>=','>='), 7082 ('<>','!='), 7083 ('=<','<='), 7084 ('=>','>='), 7085 ('=','='), 7086 (' less or equal than ','<='), 7087 (' greater or equal than ','>='), 7088 (' equal or less than ','<='), 7089 (' equal or greater than ','>='), 7090 (' less or equal ','<='), 7091 (' greater or equal ','>='), 7092 (' equal or less ','<='), 7093 (' equal or greater ','>='), 7094 (' not equal to ','!='), 7095 (' not equal ','!='), 7096 (' equal to ','='), 7097 (' equal ','='), 7098 (' equals ','='), 7099 (' less than ','<'), 7100 (' greater than ','>'), 7101 (' starts with ','startswith'), 7102 (' ends with ','endswith'), 7103 (' not in ' , 'notbelongs'), 7104 (' in ' , 'belongs'), 7105 (' is ','=')]: 7106 if a[0]==' ': 7107 text = text.replace(' is'+a,' %s ' % b) 7108 text = text.replace(a,' %s ' % b) 7109 text = re.sub('\s+',' ',text).lower() 7110 text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text) 7111 query = field = neg = op = logic = None 7112 for item in text.split(): 7113 if field is None: 7114 if item == 'not': 7115 neg = True 7116 elif not neg and not logic and item in ('and','or'): 7117 logic = item 7118 elif item in field_map: 7119 field = field_map[item] 7120 else: 7121 raise RuntimeError("Invalid syntax") 7122 elif not field is None and op is None: 7123 op = item 7124 elif not op is None: 7125 if item.startswith('#'): 7126 if not item[1:] in constants: 7127 raise RuntimeError("Invalid syntax") 7128 value = constants[item[1:]] 7129 else: 7130 value = item 7131 if field.type in ('text', 'string', 'json'): 7132 if op == '=': op = 'like' 7133 if op == '=': new_query = field==value 7134 elif op == '<': new_query = field<value 7135 elif op == '>': new_query = field>value 7136 elif op == '<=': new_query = field<=value 7137 elif op == '>=': new_query = field>=value 7138 elif op == '!=': new_query = field!=value 7139 elif op == 'belongs': new_query = field.belongs(value.split(',')) 7140 elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) 7141 elif field.type in ('text', 'string', 'json'): 7142 if op == 'contains': new_query = field.contains(value) 7143 elif op == 'like': new_query = field.like(value) 7144 elif op == 'startswith': new_query = field.startswith(value) 7145 elif op == 'endswith': new_query = field.endswith(value) 7146 else: raise RuntimeError("Invalid operation") 7147 elif field._db._adapter.dbengine=='google:datastore' and \ 7148 field.type in ('list:integer', 'list:string', 'list:reference'): 7149 if op == 'contains': new_query = field.contains(value) 7150 else: raise RuntimeError("Invalid operation") 7151 else: raise RuntimeError("Invalid operation") 7152 if neg: new_query = ~new_query 7153 if query is None: 7154 query = new_query 7155 elif logic == 'and': 7156 query &= new_query 7157 elif logic == 'or': 7158 query |= new_query 7159 field = op = neg = logic = None 7160 return query
7161
7162 -class DAL(object):
7163 7164 """ 7165 an instance of this class represents a database connection 7166 7167 Example:: 7168 7169 db = DAL('sqlite://test.db') 7170 7171 or 7172 7173 db = DAL({"uri": ..., "items": ...}) # experimental 7174 7175 db.define_table('tablename', Field('fieldname1'), 7176 Field('fieldname2')) 7177 """ 7178
7179 - def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
7180 if not hasattr(THREAD_LOCAL,'db_instances'): 7181 THREAD_LOCAL.db_instances = {} 7182 if not hasattr(THREAD_LOCAL,'db_instances_zombie'): 7183 THREAD_LOCAL.db_instances_zombie = {} 7184 if uri == '<zombie>': 7185 db_uid = kwargs['db_uid'] # a zombie must have a db_uid! 7186 if db_uid in THREAD_LOCAL.db_instances: 7187 db_group = THREAD_LOCAL.db_instances[db_uid] 7188 db = db_group[-1] 7189 elif db_uid in THREAD_LOCAL.db_instances_zombie: 7190 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7191 else: 7192 db = super(DAL, cls).__new__(cls) 7193 THREAD_LOCAL.db_instances_zombie[db_uid] = db 7194 else: 7195 db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) 7196 if db_uid in THREAD_LOCAL.db_instances_zombie: 7197 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7198 del THREAD_LOCAL.db_instances_zombie[db_uid] 7199 else: 7200 db = super(DAL, cls).__new__(cls) 7201 db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) 7202 db_group.append(db) 7203 THREAD_LOCAL.db_instances[db_uid] = db_group 7204 db._db_uid = db_uid 7205 return db
7206 7207 @staticmethod
7208 - def set_folder(folder):
7209 """ 7210 # ## this allows gluon to set a folder for this thread 7211 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 7212 """ 7213 BaseAdapter.set_folder(folder)
7214 7215 @staticmethod
7216 - def get_instances():
7217 """ 7218 Returns a dictionary with uri as key with timings and defined tables 7219 {'sqlite://storage.sqlite': { 7220 'dbstats': [(select auth_user.email from auth_user, 0.02009)], 7221 'dbtables': { 7222 'defined': ['auth_cas', 'auth_event', 'auth_group', 7223 'auth_membership', 'auth_permission', 'auth_user'], 7224 'lazy': '[]' 7225 } 7226 } 7227 } 7228 """ 7229 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 7230 infos = {} 7231 for db_uid, db_group in dbs: 7232 for db in db_group: 7233 if not db._uri: 7234 continue 7235 k = hide_password(db._uri) 7236 infos[k] = dict(dbstats = [(row[0], row[1]) for row in db._timings], 7237 dbtables = {'defined': 7238 sorted(list(set(db.tables) - 7239 set(db._LAZY_TABLES.keys()))), 7240 'lazy': sorted(db._LAZY_TABLES.keys())} 7241 ) 7242 return infos
7243 7244 @staticmethod
7245 - def distributed_transaction_begin(*instances):
7246 if not instances: 7247 return 7248 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7249 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7250 instances = enumerate(instances) 7251 for (i, db) in instances: 7252 if not db._adapter.support_distributed_transaction(): 7253 raise SyntaxError( 7254 'distributed transaction not suported by %s' % db._dbname) 7255 for (i, db) in instances: 7256 db._adapter.distributed_transaction_begin(keys[i])
7257 7258 @staticmethod
7259 - def distributed_transaction_commit(*instances):
7260 if not instances: 7261 return 7262 instances = enumerate(instances) 7263 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7264 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7265 for (i, db) in instances: 7266 if not db._adapter.support_distributed_transaction(): 7267 raise SyntaxError( 7268 'distributed transaction not suported by %s' % db._dbanme) 7269 try: 7270 for (i, db) in instances: 7271 db._adapter.prepare(keys[i]) 7272 except: 7273 for (i, db) in instances: 7274 db._adapter.rollback_prepared(keys[i]) 7275 raise RuntimeError('failure to commit distributed transaction') 7276 else: 7277 for (i, db) in instances: 7278 db._adapter.commit_prepared(keys[i]) 7279 return
7280
7281 - def __init__(self, uri=DEFAULT_URI, 7282 pool_size=0, folder=None, 7283 db_codec='UTF-8', check_reserved=None, 7284 migrate=True, fake_migrate=False, 7285 migrate_enabled=True, fake_migrate_all=False, 7286 decode_credentials=False, driver_args=None, 7287 adapter_args=None, attempts=5, auto_import=False, 7288 bigint_id=False,debug=False,lazy_tables=False, 7289 db_uid=None, do_connect=True, after_connection=None):
7290 """ 7291 Creates a new Database Abstraction Layer instance. 7292 7293 Keyword arguments: 7294 7295 :uri: string that contains information for connecting to a database. 7296 (default: 'sqlite://dummy.db') 7297 7298 experimental: you can specify a dictionary as uri 7299 parameter i.e. with 7300 db = DAL({"uri": "sqlite://storage.sqlite", 7301 "items": {...}, ...}) 7302 7303 for an example of dict input you can check the output 7304 of the scaffolding db model with 7305 7306 db.as_dict() 7307 7308 Note that for compatibility with Python older than 7309 version 2.6.5 you should cast your dict input keys 7310 to str due to a syntax limitation on kwarg names. 7311 for proper DAL dictionary input you can use one of: 7312 7313 obj = serializers.cast_keys(dict, [encoding="utf-8"]) 7314 7315 or else (for parsing json input) 7316 7317 obj = serializers.loads_json(data, unicode_keys=False) 7318 7319 :pool_size: How many open connections to make to the database object. 7320 :folder: where .table files will be created. 7321 automatically set within web2py 7322 use an explicit path when using DAL outside web2py 7323 :db_codec: string encoding of the database (default: 'UTF-8') 7324 :check_reserved: list of adapters to check tablenames and column names 7325 against sql/nosql reserved keywords. (Default None) 7326 7327 * 'common' List of sql keywords that are common to all database types 7328 such as "SELECT, INSERT". (recommended) 7329 * 'all' Checks against all known SQL keywords. (not recommended) 7330 <adaptername> Checks against the specific adapters list of keywords 7331 (recommended) 7332 * '<adaptername>_nonreserved' Checks against the specific adapters 7333 list of nonreserved keywords. (if available) 7334 :migrate (defaults to True) sets default migrate behavior for all tables 7335 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 7336 :migrate_enabled (defaults to True). If set to False disables ALL migrations 7337 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 7338 :attempts (defaults to 5). Number of times to attempt connecting 7339 :auto_import (defaults to False). If set, import automatically table definitions from the 7340 databases folder 7341 :bigint_id (defaults to False): If set, turn on bigint instead of int for id fields 7342 :lazy_tables (defaults to False): delay table definition until table access 7343 :after_connection (defaults to None): a callable that will be execute after the connection 7344 """ 7345 7346 items = None 7347 if isinstance(uri, dict): 7348 if "items" in uri: 7349 items = uri.pop("items") 7350 try: 7351 newuri = uri.pop("uri") 7352 except KeyError: 7353 newuri = DEFAULT_URI 7354 locals().update(uri) 7355 uri = newuri 7356 7357 if uri == '<zombie>' and db_uid is not None: return 7358 if not decode_credentials: 7359 credential_decoder = lambda cred: cred 7360 else: 7361 credential_decoder = lambda cred: urllib.unquote(cred) 7362 self._folder = folder 7363 if folder: 7364 self.set_folder(folder) 7365 self._uri = uri 7366 self._pool_size = pool_size 7367 self._db_codec = db_codec 7368 self._lastsql = '' 7369 self._timings = [] 7370 self._pending_references = {} 7371 self._request_tenant = 'request_tenant' 7372 self._common_fields = [] 7373 self._referee_name = '%(table)s' 7374 self._bigint_id = bigint_id 7375 self._debug = debug 7376 self._migrated = [] 7377 self._LAZY_TABLES = {} 7378 self._lazy_tables = lazy_tables 7379 self._tables = SQLCallableList() 7380 self._driver_args = driver_args 7381 self._adapter_args = adapter_args 7382 self._check_reserved = check_reserved 7383 self._decode_credentials = decode_credentials 7384 self._attempts = attempts 7385 self._do_connect = do_connect 7386 7387 if not str(attempts).isdigit() or attempts < 0: 7388 attempts = 5 7389 if uri: 7390 uris = isinstance(uri,(list,tuple)) and uri or [uri] 7391 error = '' 7392 connected = False 7393 for k in range(attempts): 7394 for uri in uris: 7395 try: 7396 if is_jdbc and not uri.startswith('jdbc:'): 7397 uri = 'jdbc:'+uri 7398 self._dbname = REGEX_DBNAME.match(uri).group() 7399 if not self._dbname in ADAPTERS: 7400 raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) 7401 # notice that driver args or {} else driver_args 7402 # defaults to {} global, not correct 7403 kwargs = dict(db=self,uri=uri, 7404 pool_size=pool_size, 7405 folder=folder, 7406 db_codec=db_codec, 7407 credential_decoder=credential_decoder, 7408 driver_args=driver_args or {}, 7409 adapter_args=adapter_args or {}, 7410 do_connect=do_connect, 7411 after_connection=after_connection) 7412 self._adapter = ADAPTERS[self._dbname](**kwargs) 7413 types = ADAPTERS[self._dbname].types 7414 # copy so multiple DAL() possible 7415 self._adapter.types = copy.copy(types) 7416 self._adapter.build_parsemap() 7417 if bigint_id: 7418 if 'big-id' in types and 'reference' in types: 7419 self._adapter.types['id'] = types['big-id'] 7420 self._adapter.types['reference'] = types['big-reference'] 7421 connected = True 7422 break 7423 except SyntaxError: 7424 raise 7425 except Exception: 7426 tb = traceback.format_exc() 7427 sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) 7428 if connected: 7429 break 7430 else: 7431 time.sleep(1) 7432 if not connected: 7433 raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) 7434 else: 7435 self._adapter = BaseAdapter(db=self,pool_size=0, 7436 uri='None',folder=folder, 7437 db_codec=db_codec, after_connection=after_connection) 7438 migrate = fake_migrate = False 7439 adapter = self._adapter 7440 self._uri_hash = hashlib_md5(adapter.uri).hexdigest() 7441 self.check_reserved = check_reserved 7442 if self.check_reserved: 7443 from reserved_sql_keywords import ADAPTERS as RSK 7444 self.RSK = RSK 7445 self._migrate = migrate 7446 self._fake_migrate = fake_migrate 7447 self._migrate_enabled = migrate_enabled 7448 self._fake_migrate_all = fake_migrate_all 7449 if auto_import or items: 7450 self.import_table_definitions(adapter.folder, 7451 items=items)
7452 7453 @property
7454 - def tables(self):
7455 return self._tables
7456
7457 - def import_table_definitions(self, path, migrate=False, 7458 fake_migrate=False, items=None):
7459 pattern = pjoin(path,self._uri_hash+'_*.table') 7460 if items: 7461 for tablename, table in items.iteritems(): 7462 # TODO: read all field/table options 7463 fields = [] 7464 # remove unsupported/illegal Table arguments 7465 [table.pop(name) for name in ("name", "fields") if 7466 name in table] 7467 if "items" in table: 7468 for fieldname, field in table.pop("items").iteritems(): 7469 # remove unsupported/illegal Field arguments 7470 [field.pop(key) for key in ("requires", "name", 7471 "compute", "colname") if key in field] 7472 fields.append(Field(str(fieldname), **field)) 7473 self.define_table(str(tablename), *fields, **table) 7474 else: 7475 for filename in glob.glob(pattern): 7476 tfile = self._adapter.file_open(filename, 'r') 7477 try: 7478 sql_fields = pickle.load(tfile) 7479 name = filename[len(pattern)-7:-6] 7480 mf = [(value['sortable'], 7481 Field(key, 7482 type=value['type'], 7483 length=value.get('length',None), 7484 notnull=value.get('notnull',False), 7485 unique=value.get('unique',False))) \ 7486 for key, value in sql_fields.iteritems()] 7487 mf.sort(lambda a,b: cmp(a[0],b[0])) 7488 self.define_table(name,*[item[1] for item in mf], 7489 **dict(migrate=migrate, 7490 fake_migrate=fake_migrate)) 7491 finally: 7492 self._adapter.file_close(tfile)
7493
7494 - def check_reserved_keyword(self, name):
7495 """ 7496 Validates ``name`` against SQL keywords 7497 Uses self.check_reserve which is a list of 7498 operators to use. 7499 self.check_reserved 7500 ['common', 'postgres', 'mysql'] 7501 self.check_reserved 7502 ['all'] 7503 """ 7504 for backend in self.check_reserved: 7505 if name.upper() in self.RSK[backend]: 7506 raise SyntaxError( 7507 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
7508
7509 - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
7510 """ 7511 EXAMPLE: 7512 7513 db.define_table('person',Field('name'),Field('info')) 7514 db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info')) 7515 7516 @request.restful() 7517 def index(): 7518 def GET(*args,**vars): 7519 patterns = [ 7520 "/friends[person]", 7521 "/{person.name}/:field", 7522 "/{person.name}/pets[pet.ownedby]", 7523 "/{person.name}/pets[pet.ownedby]/{pet.name}", 7524 "/{person.name}/pets[pet.ownedby]/{pet.name}/:field", 7525 ("/dogs[pet]", db.pet.info=='dog'), 7526 ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), 7527 ] 7528 parser = db.parse_as_rest(patterns,args,vars) 7529 if parser.status == 200: 7530 return dict(content=parser.response) 7531 else: 7532 raise HTTP(parser.status,parser.error) 7533 7534 def POST(table_name,**vars): 7535 if table_name == 'person': 7536 return db.person.validate_and_insert(**vars) 7537 elif table_name == 'pet': 7538 return db.pet.validate_and_insert(**vars) 7539 else: 7540 raise HTTP(400) 7541 return locals() 7542 """ 7543 7544 db = self 7545 re1 = REGEX_SEARCH_PATTERN 7546 re2 = REGEX_SQUARE_BRACKETS 7547 7548 def auto_table(table,base='',depth=0): 7549 patterns = [] 7550 for field in db[table].fields: 7551 if base: 7552 tag = '%s/%s' % (base,field.replace('_','-')) 7553 else: 7554 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 7555 f = db[table][field] 7556 if not f.readable: continue 7557 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 7558 tag += '/{%s.%s}' % (table,field) 7559 patterns.append(tag) 7560 patterns.append(tag+'/:field') 7561 elif f.type.startswith('boolean'): 7562 tag += '/{%s.%s}' % (table,field) 7563 patterns.append(tag) 7564 patterns.append(tag+'/:field') 7565 elif f.type in ('float','double','integer','bigint'): 7566 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 7567 patterns.append(tag) 7568 patterns.append(tag+'/:field') 7569 elif f.type.startswith('list:'): 7570 tag += '/{%s.%s.contains}' % (table,field) 7571 patterns.append(tag) 7572 patterns.append(tag+'/:field') 7573 elif f.type in ('date','datetime'): 7574 tag+= '/{%s.%s.year}' % (table,field) 7575 patterns.append(tag) 7576 patterns.append(tag+'/:field') 7577 tag+='/{%s.%s.month}' % (table,field) 7578 patterns.append(tag) 7579 patterns.append(tag+'/:field') 7580 tag+='/{%s.%s.day}' % (table,field) 7581 patterns.append(tag) 7582 patterns.append(tag+'/:field') 7583 if f.type in ('datetime','time'): 7584 tag+= '/{%s.%s.hour}' % (table,field) 7585 patterns.append(tag) 7586 patterns.append(tag+'/:field') 7587 tag+='/{%s.%s.minute}' % (table,field) 7588 patterns.append(tag) 7589 patterns.append(tag+'/:field') 7590 tag+='/{%s.%s.second}' % (table,field) 7591 patterns.append(tag) 7592 patterns.append(tag+'/:field') 7593 if depth>0: 7594 for f in db[table]._referenced_by: 7595 tag+='/%s[%s.%s]' % (table,f.tablename,f.name) 7596 patterns.append(tag) 7597 patterns += auto_table(table,base=tag,depth=depth-1) 7598 return patterns
7599 7600 if patterns == 'auto': 7601 patterns=[] 7602 for table in db.tables: 7603 if not table.startswith('auth_'): 7604 patterns.append('/%s[%s]' % (table,table)) 7605 patterns += auto_table(table,base='',depth=1) 7606 else: 7607 i = 0 7608 while i<len(patterns): 7609 pattern = patterns[i] 7610 if not isinstance(pattern,str): 7611 pattern = pattern[0] 7612 tokens = pattern.split('/') 7613 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 7614 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], 7615 '/'.join(tokens[:-1])) 7616 patterns = patterns[:i]+new_patterns+patterns[i+1:] 7617 i += len(new_patterns) 7618 else: 7619 i += 1 7620 if '/'.join(args) == 'patterns': 7621 return Row({'status':200,'pattern':'list', 7622 'error':None,'response':patterns}) 7623 for pattern in patterns: 7624 basequery, exposedfields = None, [] 7625 if isinstance(pattern,tuple): 7626 if len(pattern)==2: 7627 pattern, basequery = pattern 7628 elif len(pattern)>2: 7629 pattern, basequery, exposedfields = pattern[0:3] 7630 otable=table=None 7631 if not isinstance(queries,dict): 7632 dbset=db(queries) 7633 if basequery is not None: 7634 dbset = dbset(basequery) 7635 i=0 7636 tags = pattern[1:].split('/') 7637 if len(tags)!=len(args): 7638 continue 7639 for tag in tags: 7640 if re1.match(tag): 7641 # print 're1:'+tag 7642 tokens = tag[1:-1].split('.') 7643 table, field = tokens[0], tokens[1] 7644 if not otable or table == otable: 7645 if len(tokens)==2 or tokens[2]=='eq': 7646 query = db[table][field]==args[i] 7647 elif tokens[2]=='ne': 7648 query = db[table][field]!=args[i] 7649 elif tokens[2]=='lt': 7650 query = db[table][field]<args[i] 7651 elif tokens[2]=='gt': 7652 query = db[table][field]>args[i] 7653 elif tokens[2]=='ge': 7654 query = db[table][field]>=args[i] 7655 elif tokens[2]=='le': 7656 query = db[table][field]<=args[i] 7657 elif tokens[2]=='year': 7658 query = db[table][field].year()==args[i] 7659 elif tokens[2]=='month': 7660 query = db[table][field].month()==args[i] 7661 elif tokens[2]=='day': 7662 query = db[table][field].day()==args[i] 7663 elif tokens[2]=='hour': 7664 query = db[table][field].hour()==args[i] 7665 elif tokens[2]=='minute': 7666 query = db[table][field].minutes()==args[i] 7667 elif tokens[2]=='second': 7668 query = db[table][field].seconds()==args[i] 7669 elif tokens[2]=='startswith': 7670 query = db[table][field].startswith(args[i]) 7671 elif tokens[2]=='contains': 7672 query = db[table][field].contains(args[i]) 7673 else: 7674 raise RuntimeError("invalid pattern: %s" % pattern) 7675 if len(tokens)==4 and tokens[3]=='not': 7676 query = ~query 7677 elif len(tokens)>=4: 7678 raise RuntimeError("invalid pattern: %s" % pattern) 7679 if not otable and isinstance(queries,dict): 7680 dbset = db(queries[table]) 7681 if basequery is not None: 7682 dbset = dbset(basequery) 7683 dbset=dbset(query) 7684 else: 7685 raise RuntimeError("missing relation in pattern: %s" % pattern) 7686 elif re2.match(tag) and args[i]==tag[:tag.find('[')]: 7687 ref = tag[tag.find('[')+1:-1] 7688 if '.' in ref and otable: 7689 table,field = ref.split('.') 7690 selfld = '_id' 7691 if db[table][field].type.startswith('reference '): 7692 refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] 7693 else: 7694 refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] 7695 if refs: 7696 selfld = refs[0] 7697 if nested_select: 7698 try: 7699 dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) 7700 except ValueError: 7701 return Row({'status':400,'pattern':pattern, 7702 'error':'invalid path','response':None}) 7703 else: 7704 items = [item.id for item in dbset.select(db[otable][selfld])] 7705 dbset=db(db[table][field].belongs(items)) 7706 else: 7707 table = ref 7708 if not otable and isinstance(queries,dict): 7709 dbset = db(queries[table]) 7710 dbset=dbset(db[table]) 7711 elif tag==':field' and table: 7712 # print 're3:'+tag 7713 field = args[i] 7714 if not field in db[table]: break 7715 # hand-built patterns should respect .readable=False as well 7716 if not db[table][field].readable: 7717 return Row({'status':418,'pattern':pattern, 7718 'error':'I\'m a teapot','response':None}) 7719 try: 7720 distinct = vars.get('distinct', False) == 'True' 7721 offset = long(vars.get('offset',None) or 0) 7722 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7723 except ValueError: 7724 return Row({'status':400,'error':'invalid limits','response':None}) 7725 items = dbset.select(db[table][field], distinct=distinct, limitby=limits) 7726 if items: 7727 return Row({'status':200,'response':items, 7728 'pattern':pattern}) 7729 else: 7730 return Row({'status':404,'pattern':pattern, 7731 'error':'no record found','response':None}) 7732 elif tag != args[i]: 7733 break 7734 otable = table 7735 i += 1 7736 if i==len(tags) and table: 7737 ofields = vars.get('order',db[table]._id.name).split('|') 7738 try: 7739 orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] 7740 except (KeyError, AttributeError): 7741 return Row({'status':400,'error':'invalid orderby','response':None}) 7742 if exposedfields: 7743 fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] 7744 else: 7745 fields = [field for field in db[table] if field.readable] 7746 count = dbset.count() 7747 try: 7748 offset = long(vars.get('offset',None) or 0) 7749 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7750 except ValueError: 7751 return Row({'status':400,'error':'invalid limits','response':None}) 7752 if count > limits[1]-limits[0]: 7753 return Row({'status':400,'error':'too many records','response':None}) 7754 try: 7755 response = dbset.select(limitby=limits,orderby=orderby,*fields) 7756 except ValueError: 7757 return Row({'status':400,'pattern':pattern, 7758 'error':'invalid path','response':None}) 7759 return Row({'status':200,'response':response, 7760 'pattern':pattern,'count':count}) 7761 return Row({'status':400,'error':'no matching pattern','response':None})
7762
7763 - def define_table( 7764 self, 7765 tablename, 7766 *fields, 7767 **args 7768 ):
7769 if not fields and 'fields' in args: 7770 fields = args.get('fields',()) 7771 if not isinstance(tablename,str): 7772 raise SyntaxError("missing table name") 7773 elif hasattr(self,tablename) or tablename in self.tables: 7774 if not args.get('redefine',False): 7775 raise SyntaxError('table already defined: %s' % tablename) 7776 elif tablename.startswith('_') or hasattr(self,tablename) or \ 7777 REGEX_PYTHON_KEYWORDS.match(tablename): 7778 raise SyntaxError('invalid table name: %s' % tablename) 7779 elif self.check_reserved: 7780 self.check_reserved_keyword(tablename) 7781 else: 7782 invalid_args = set(args)-TABLE_ARGS 7783 if invalid_args: 7784 raise SyntaxError('invalid table "%s" attributes: %s' \ 7785 % (tablename,invalid_args)) 7786 if self._lazy_tables and not tablename in self._LAZY_TABLES: 7787 self._LAZY_TABLES[tablename] = (tablename,fields,args) 7788 table = None 7789 else: 7790 table = self.lazy_define_table(tablename,*fields,**args) 7791 if not tablename in self.tables: 7792 self.tables.append(tablename) 7793 return table
7794
7795 - def lazy_define_table( 7796 self, 7797 tablename, 7798 *fields, 7799 **args 7800 ):
7801 args_get = args.get 7802 common_fields = self._common_fields 7803 if common_fields: 7804 fields = list(fields) + list(common_fields) 7805 7806 table_class = args_get('table_class',Table) 7807 table = table_class(self, tablename, *fields, **args) 7808 table._actual = True 7809 self[tablename] = table 7810 # must follow above line to handle self references 7811 table._create_references() 7812 for field in table: 7813 if field.requires == DEFAULT: 7814 field.requires = sqlhtml_validators(field) 7815 7816 migrate = self._migrate_enabled and args_get('migrate',self._migrate) 7817 if migrate and not self._uri in (None,'None') \ 7818 or self._adapter.dbengine=='google:datastore': 7819 fake_migrate = self._fake_migrate_all or \ 7820 args_get('fake_migrate',self._fake_migrate) 7821 polymodel = args_get('polymodel',None) 7822 try: 7823 GLOBAL_LOCKER.acquire() 7824 self._lastsql = self._adapter.create_table( 7825 table,migrate=migrate, 7826 fake_migrate=fake_migrate, 7827 polymodel=polymodel) 7828 finally: 7829 GLOBAL_LOCKER.release() 7830 else: 7831 table._dbt = None 7832 on_define = args_get('on_define',None) 7833 if on_define: on_define(table) 7834 return table
7835
7836 - def as_dict(self, flat=False, sanitize=True, field_options=True):
7837 dbname = db_uid = uri = None 7838 if not sanitize: 7839 uri, dbname, db_uid = (self._uri, self._dbname, self._db_uid) 7840 db_as_dict = dict(items={}, tables=[], uri=uri, dbname=dbname, 7841 db_uid=db_uid, 7842 **dict([(k, getattr(self, "_" + k)) for 7843 k in 'pool_size','folder','db_codec', 7844 'check_reserved','migrate','fake_migrate', 7845 'migrate_enabled','fake_migrate_all', 7846 'decode_credentials','driver_args', 7847 'adapter_args', 'attempts', 7848 'bigint_id','debug','lazy_tables', 7849 'do_connect'])) 7850 7851 for table in self: 7852 tablename = str(table) 7853 db_as_dict["tables"].append(tablename) 7854 db_as_dict["items"][tablename] = table.as_dict(flat=flat, 7855 sanitize=sanitize, 7856 field_options=field_options) 7857 return db_as_dict
7858
7859 - def as_xml(self, sanitize=True, field_options=True):
7860 if not have_serializers: 7861 raise ImportError("No xml serializers available") 7862 d = self.as_dict(flat=True, sanitize=sanitize, 7863 field_options=field_options) 7864 return serializers.xml(d)
7865
7866 - def as_json(self, sanitize=True, field_options=True):
7867 if not have_serializers: 7868 raise ImportError("No json serializers available") 7869 d = self.as_dict(flat=True, sanitize=sanitize, 7870 field_options=field_options) 7871 return serializers.json(d)
7872
7873 - def as_yaml(self, sanitize=True, field_options=True):
7874 if not have_serializers: 7875 raise ImportError("No YAML serializers available") 7876 d = self.as_dict(flat=True, sanitize=sanitize, 7877 field_options=field_options) 7878 return serializers.yaml(d)
7879
7880 - def __contains__(self, tablename):
7881 try: 7882 return tablename in self.tables 7883 except AttributeError: 7884 # The instance has no .tables attribute yet 7885 return False
7886 7887 has_key = __contains__ 7888
7889 - def get(self,key,default=None):
7890 return self.__dict__.get(key,default)
7891
7892 - def __iter__(self):
7893 for tablename in self.tables: 7894 yield self[tablename]
7895
7896 - def __getitem__(self, key):
7897 return self.__getattr__(str(key))
7898
7899 - def __getattr__(self, key):
7900 if ogetattr(self,'_lazy_tables') and \ 7901 key in ogetattr(self,'_LAZY_TABLES'): 7902 tablename, fields, args = self._LAZY_TABLES.pop(key) 7903 return self.lazy_define_table(tablename,*fields,**args) 7904 return ogetattr(self, key)
7905
7906 - def __setitem__(self, key, value):
7907 osetattr(self, str(key), value)
7908
7909 - def __setattr__(self, key, value):
7910 if key[:1]!='_' and key in self: 7911 raise SyntaxError( 7912 'Object %s exists and cannot be redefined' % key) 7913 osetattr(self,key,value)
7914 7915 __delitem__ = object.__delattr__ 7916
7917 - def __repr__(self):
7918 if hasattr(self,'_uri'): 7919 return '<DAL uri="%s">' % hide_password(str(self._uri)) 7920 else: 7921 return '<DAL db_uid="%s">' % self._db_uid
7922
7923 - def smart_query(self,fields,text):
7924 return Set(self, smart_query(fields,text))
7925
7926 - def __call__(self, query=None, ignore_common_filters=None):
7927 if isinstance(query,Table): 7928 query = self._adapter.id_query(query) 7929 elif isinstance(query,Field): 7930 query = query!=None 7931 elif isinstance(query, dict): 7932 icf = query.get("ignore_common_filters") 7933 if icf: ignore_common_filters = icf 7934 return Set(self, query, ignore_common_filters=ignore_common_filters)
7935
7936 - def commit(self):
7937 self._adapter.commit()
7938
7939 - def rollback(self):
7940 self._adapter.rollback()
7941
7942 - def close(self):
7943 self._adapter.close() 7944 if self._db_uid in THREAD_LOCAL.db_instances: 7945 db_group = THREAD_LOCAL.db_instances[self._db_uid] 7946 db_group.remove(self) 7947 if not db_group: 7948 del THREAD_LOCAL.db_instances[self._db_uid]
7949
7950 - def executesql(self, query, placeholders=None, as_dict=False, 7951 fields=None, colnames=None):
7952 """ 7953 placeholders is optional and will always be None. 7954 If using raw SQL with placeholders, placeholders may be 7955 a sequence of values to be substituted in 7956 or, (if supported by the DB driver), a dictionary with keys 7957 matching named placeholders in your SQL. 7958 7959 Added 2009-12-05 "as_dict" optional argument. Will always be 7960 None when using DAL. If using raw SQL can be set to True 7961 and the results cursor returned by the DB driver will be 7962 converted to a sequence of dictionaries keyed with the db 7963 field names. Tested with SQLite but should work with any database 7964 since the cursor.description used to get field names is part of the 7965 Python dbi 2.0 specs. Results returned with as_dict=True are 7966 the same as those returned when applying .to_list() to a DAL query. 7967 7968 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 7969 7970 Added 2012-08-24 "fields" and "colnames" optional arguments. If either 7971 is provided, the results cursor returned by the DB driver will be 7972 converted to a DAL Rows object using the db._adapter.parse() method. 7973 7974 The "fields" argument is a list of DAL Field objects that match the 7975 fields returned from the DB. The Field objects should be part of one or 7976 more Table objects defined on the DAL object. The "fields" list can 7977 include one or more DAL Table objects in addition to or instead of 7978 including Field objects, or it can be just a single table (not in a 7979 list). In that case, the Field objects will be extracted from the 7980 table(s). 7981 7982 Instead of specifying the "fields" argument, the "colnames" argument 7983 can be specified as a list of field names in tablename.fieldname format. 7984 Again, these should represent tables and fields defined on the DAL 7985 object. 7986 7987 It is also possible to specify both "fields" and the associated 7988 "colnames". In that case, "fields" can also include DAL Expression 7989 objects in addition to Field objects. For Field objects in "fields", 7990 the associated "colnames" must still be in tablename.fieldname format. 7991 For Expression objects in "fields", the associated "colnames" can 7992 be any arbitrary labels. 7993 7994 Note, the DAL Table objects referred to by "fields" or "colnames" can 7995 be dummy tables and do not have to represent any real tables in the 7996 database. Also, note that the "fields" and "colnames" must be in the 7997 same order as the fields in the results cursor returned from the DB. 7998 """ 7999 adapter = self._adapter 8000 if placeholders: 8001 adapter.execute(query, placeholders) 8002 else: 8003 adapter.execute(query) 8004 if as_dict: 8005 if not hasattr(adapter.cursor,'description'): 8006 raise RuntimeError("database does not support executesql(...,as_dict=True)") 8007 # Non-DAL legacy db query, converts cursor results to dict. 8008 # sequence of 7-item sequences. each sequence tells about a column. 8009 # first item is always the field name according to Python Database API specs 8010 columns = adapter.cursor.description 8011 # reduce the column info down to just the field names 8012 fields = [f[0] for f in columns] 8013 # will hold our finished resultset in a list 8014 data = adapter._fetchall() 8015 # convert the list for each row into a dictionary so it's 8016 # easier to work with. row['field_name'] rather than row[0] 8017 return [dict(zip(fields,row)) for row in data] 8018 try: 8019 data = adapter._fetchall() 8020 except: 8021 return None 8022 if fields or colnames: 8023 fields = [] if fields is None else fields 8024 if not isinstance(fields, list): 8025 fields = [fields] 8026 extracted_fields = [] 8027 for field in fields: 8028 if isinstance(field, Table): 8029 extracted_fields.extend([f for f in field]) 8030 else: 8031 extracted_fields.append(field) 8032 if not colnames: 8033 colnames = ['%s.%s' % (f.tablename, f.name) 8034 for f in extracted_fields] 8035 data = adapter.parse( 8036 data, fields=extracted_fields, colnames=colnames) 8037 return data
8038
8039 - def _remove_references_to(self, thistable):
8040 for table in self: 8041 table._referenced_by = [field for field in table._referenced_by 8042 if not field.table==thistable]
8043
8044 - def export_to_csv_file(self, ofile, *args, **kwargs):
8045 step = long(kwargs.get('max_fetch_rows,',500)) 8046 write_colnames = kwargs['write_colnames'] = \ 8047 kwargs.get("write_colnames", True) 8048 for table in self.tables: 8049 ofile.write('TABLE %s\r\n' % table) 8050 query = self._adapter.id_query(self[table]) 8051 nrows = self(query).count() 8052 kwargs['write_colnames'] = write_colnames 8053 for k in range(0,nrows,step): 8054 self(query).select(limitby=(k,k+step)).export_to_csv_file( 8055 ofile, *args, **kwargs) 8056 kwargs['write_colnames'] = False 8057 ofile.write('\r\n\r\n') 8058 ofile.write('END')
8059
8060 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 8061 unique='uuid', map_tablenames=None, 8062 ignore_missing_tables=False, 8063 *args, **kwargs):
8064 #if id_map is None: id_map={} 8065 id_offset = {} # only used if id_map is None 8066 map_tablenames = map_tablenames or {} 8067 for line in ifile: 8068 line = line.strip() 8069 if not line: 8070 continue 8071 elif line == 'END': 8072 return 8073 elif not line.startswith('TABLE ') or \ 8074 not line[6:] in self.tables: 8075 raise SyntaxError('invalid file format') 8076 else: 8077 tablename = line[6:] 8078 tablename = map_tablenames.get(tablename,tablename) 8079 if tablename is not None and tablename in self.tables: 8080 self[tablename].import_from_csv_file( 8081 ifile, id_map, null, unique, id_offset, 8082 *args, **kwargs) 8083 elif tablename is None or ignore_missing_tables: 8084 # skip all non-empty lines 8085 for line in ifile: 8086 if not line.strip(): 8087 break 8088 else: 8089 raise RuntimeError("Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)")
8090
8091 8092 -def DAL_unpickler(db_uid):
8093 return DAL('<zombie>',db_uid=db_uid)
8094
8095 -def DAL_pickler(db):
8096 return DAL_unpickler, (db._db_uid,)
8097 8098 copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
8099 8100 -class SQLALL(object):
8101 """ 8102 Helper class providing a comma-separated string having all the field names 8103 (prefixed by table name and '.') 8104 8105 normally only called from within gluon.sql 8106 """ 8107
8108 - def __init__(self, table):
8109 self._table = table
8110
8111 - def __str__(self):
8112 return ', '.join([str(field) for field in self._table])
8113
8114 # class Reference(int): 8115 -class Reference(long):
8116
8117 - def __allocate(self):
8118 if not self._record: 8119 self._record = self._table[long(self)] 8120 if not self._record: 8121 raise RuntimeError( 8122 "Using a recursive select but encountered a broken reference: %s %d"%(self._table, long(self)))
8123
8124 - def __getattr__(self, key):
8125 if key == 'id': 8126 return long(self) 8127 self.__allocate() 8128 return self._record.get(key, None)
8129
8130 - def get(self, key, default=None):
8131 return self.__getattr__(key, default)
8132
8133 - def __setattr__(self, key, value):
8134 if key.startswith('_'): 8135 long.__setattr__(self, key, value) 8136 return 8137 self.__allocate() 8138 self._record[key] = value
8139
8140 - def __getitem__(self, key):
8141 if key == 'id': 8142 return long(self) 8143 self.__allocate() 8144 return self._record.get(key, None)
8145
8146 - def __setitem__(self,key,value):
8147 self.__allocate() 8148 self._record[key] = value
8149
8150 8151 -def Reference_unpickler(data):
8152 return marshal.loads(data)
8153
8154 -def Reference_pickler(data):
8155 try: 8156 marshal_dump = marshal.dumps(long(data)) 8157 except AttributeError: 8158 marshal_dump = 'i%s' % struct.pack('<i', long(data)) 8159 return (Reference_unpickler, (marshal_dump,))
8160 8161 copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
8162 8163 -class MethodAdder(object):
8164 - def __init__(self,table):
8165 self.table = table
8166 - def __call__(self):
8167 return self.register()
8168 - def __getattr__(self,method_name):
8169 return self.register(method_name)
8170 - def register(self,method_name=None):
8171 def _decorated(f): 8172 instance = self.table 8173 import types 8174 method = types.MethodType(f, instance, instance.__class__) 8175 name = method_name or f.func_name 8176 setattr(instance, name, method) 8177 return f
8178 return _decorated
8179
8180 -class Table(object):
8181 8182 """ 8183 an instance of this class represents a database table 8184 8185 Example:: 8186 8187 db = DAL(...) 8188 db.define_table('users', Field('name')) 8189 db.users.insert(name='me') # print db.users._insert(...) to see SQL 8190 db.users.drop() 8191 """ 8192
8193 - def __init__( 8194 self, 8195 db, 8196 tablename, 8197 *fields, 8198 **args 8199 ):
8200 """ 8201 Initializes the table and performs checking on the provided fields. 8202 8203 Each table will have automatically an 'id'. 8204 8205 If a field is of type Table, the fields (excluding 'id') from that table 8206 will be used instead. 8207 8208 :raises SyntaxError: when a supplied field is of incorrect type. 8209 """ 8210 self._actual = False # set to True by define_table() 8211 self._tablename = tablename 8212 self._ot = args.get('actual_name') 8213 self._sequence_name = args.get('sequence_name') or \ 8214 db and db._adapter.sequence_name(tablename) 8215 self._trigger_name = args.get('trigger_name') or \ 8216 db and db._adapter.trigger_name(tablename) 8217 self._common_filter = args.get('common_filter') 8218 self._format = args.get('format') 8219 self._singular = args.get( 8220 'singular',tablename.replace('_',' ').capitalize()) 8221 self._plural = args.get( 8222 'plural',pluralize(self._singular.lower()).capitalize()) 8223 # horrible but for backard compatibility of appamdin: 8224 if 'primarykey' in args and args['primarykey'] is not None: 8225 self._primarykey = args.get('primarykey') 8226 8227 self._before_insert = [] 8228 self._before_update = [Set.delete_uploaded_files] 8229 self._before_delete = [Set.delete_uploaded_files] 8230 self._after_insert = [] 8231 self._after_update = [] 8232 self._after_delete = [] 8233 8234 self.add_method = MethodAdder(self) 8235 8236 fieldnames,newfields=set(),[] 8237 if hasattr(self,'_primarykey'): 8238 if not isinstance(self._primarykey,list): 8239 raise SyntaxError( 8240 "primarykey must be a list of fields from table '%s'" \ 8241 % tablename) 8242 if len(self._primarykey)==1: 8243 self._id = [f for f in fields if isinstance(f,Field) \ 8244 and f.name==self._primarykey[0]][0] 8245 elif not [f for f in fields if isinstance(f,Field) and f.type=='id']: 8246 field = Field('id', 'id') 8247 newfields.append(field) 8248 fieldnames.add('id') 8249 self._id = field 8250 virtual_fields = [] 8251 def include_new(field): 8252 newfields.append(field) 8253 fieldnames.add(field.name) 8254 if field.type=='id': 8255 self._id = field
8256 for field in fields: 8257 if isinstance(field, (FieldMethod, FieldVirtual)): 8258 virtual_fields.append(field) 8259 elif isinstance(field, Field) and not field.name in fieldnames: 8260 if field.db is not None: 8261 field = copy.copy(field) 8262 include_new(field) 8263 elif isinstance(field, dict) and 'fieldname' and \ 8264 not field['fieldname'] in fieldnames: 8265 include_new(Field(**field)) 8266 elif isinstance(field, Table): 8267 table = field 8268 for field in table: 8269 if not field.name in fieldnames and not field.type=='id': 8270 t2 = not table._actual and self._tablename 8271 include_new(field.clone(point_self_references_to=t2)) 8272 elif not isinstance(field, (Field, Table)): 8273 raise SyntaxError( 8274 'define_table argument is not a Field or Table: %s' % field) 8275 fields = newfields 8276 self._db = db 8277 tablename = tablename 8278 self._fields = SQLCallableList() 8279 self.virtualfields = [] 8280 fields = list(fields) 8281 8282 if db and db._adapter.uploads_in_blob==True: 8283 uploadfields = [f.name for f in fields if f.type=='blob'] 8284 for field in fields: 8285 fn = field.uploadfield 8286 if isinstance(field, Field) and field.type == 'upload'\ 8287 and fn is True: 8288 fn = field.uploadfield = '%s_blob' % field.name 8289 if isinstance(fn,str) and not fn in uploadfields: 8290 fields.append(Field(fn,'blob',default='', 8291 writable=False,readable=False)) 8292 8293 lower_fieldnames = set() 8294 reserved = dir(Table) + ['fields'] 8295 for field in fields: 8296 field_name = field.name 8297 if db and db.check_reserved: 8298 db.check_reserved_keyword(field_name) 8299 elif field_name in reserved: 8300 raise SyntaxError("field name %s not allowed" % field_name) 8301 8302 if field_name.lower() in lower_fieldnames: 8303 raise SyntaxError("duplicate field %s in table %s" \ 8304 % (field_name, tablename)) 8305 else: 8306 lower_fieldnames.add(field_name.lower()) 8307 8308 self.fields.append(field_name) 8309 self[field_name] = field 8310 if field.type == 'id': 8311 self['id'] = field 8312 field.tablename = field._tablename = tablename 8313 field.table = field._table = self 8314 field.db = field._db = db 8315 self.ALL = SQLALL(self) 8316 8317 if hasattr(self,'_primarykey'): 8318 for k in self._primarykey: 8319 if k not in self.fields: 8320 raise SyntaxError( 8321 "primarykey must be a list of fields from table '%s " % tablename) 8322 else: 8323 self[k].notnull = True 8324 for field in virtual_fields: 8325 self[field.name] = field
8326 8327 @property
8328 - def fields(self):
8329 return self._fields
8330
8331 - def update(self,*args,**kwargs):
8332 raise RuntimeError("Syntax Not Supported")
8333
8334 - def _enable_record_versioning(self, 8335 archive_db=None, 8336 archive_name = '%(tablename)s_archive', 8337 current_record = 'current_record', 8338 is_active = 'is_active'):
8339 db = self._db 8340 archive_db = archive_db or db 8341 archive_name = archive_name % dict(tablename=self._tablename) 8342 if archive_name in archive_db.tables(): 8343 return # do not try define the archive if already exists 8344 fieldnames = self.fields() 8345 same_db = archive_db is db 8346 field_type = self if same_db else 'bigint' 8347 clones = [] 8348 for field in self: 8349 nfk = same_db or not field.type.startswith('reference') 8350 clones.append(field.clone( 8351 unique=False, type=field.type if nfk else 'bigint')) 8352 archive_db.define_table( 8353 archive_name, Field(current_record,field_type), *clones) 8354 self._before_update.append( 8355 lambda qset,fs,db=archive_db,an=archive_name,cn=current_record: 8356 archive_record(qset,fs,db[an],cn)) 8357 if is_active and is_active in fieldnames: 8358 self._before_delete.append( 8359 lambda qset: qset.update(is_active=False)) 8360 newquery = lambda query, t=self, name=self._tablename: \ 8361 reduce(AND,[db[tn].is_active == True 8362 for tn in db._adapter.tables(query) 8363 if tn==name or getattr(db[tn],'_ot',None)==name]) 8364 query = self._common_filter 8365 if query: 8366 newquery = query & newquery 8367 self._common_filter = newquery
8368
8369 - def _validate(self,**vars):
8370 errors = Row() 8371 for key,value in vars.iteritems(): 8372 value,error = self[key].validate(value) 8373 if error: 8374 errors[key] = error 8375 return errors
8376
8377 - def _create_references(self):
8378 db = self._db 8379 pr = db._pending_references 8380 self._referenced_by = [] 8381 self._references = [] 8382 for field in self: 8383 fieldname = field.name 8384 field_type = field.type 8385 if isinstance(field_type,str) and field_type[:10] == 'reference ': 8386 ref = field_type[10:].strip() 8387 if not ref.strip(): 8388 raise SyntaxError('Table: reference to nothing: %s' %ref) 8389 if '.' in ref: 8390 rtablename, rfieldname = ref.split('.',1) 8391 else: 8392 rtablename, rfieldname = ref, None 8393 if not rtablename in db: 8394 pr[rtablename] = pr.get(rtablename,[]) + [field] 8395 continue 8396 rtable = db[rtablename] 8397 if rfieldname: 8398 if not hasattr(rtable,'_primarykey'): 8399 raise SyntaxError( 8400 'keyed tables can only reference other keyed tables (for now)') 8401 if rfieldname not in rtable.fields: 8402 raise SyntaxError( 8403 "invalid field '%s' for referenced table '%s' in table '%s'" \ 8404 % (rfieldname, rtablename, self._tablename)) 8405 rfield = rtable[rfieldname] 8406 else: 8407 rfield = rtable._id 8408 rtable._referenced_by.append(field) 8409 field.referent = rfield 8410 self._references.append(field) 8411 else: 8412 field.referent = None 8413 for referee in pr.get(self._tablename,[]): 8414 self._referenced_by.append(referee)
8415
8416 - def _filter_fields(self, record, id=False):
8417 return dict([(k, v) for (k, v) in record.iteritems() if k 8418 in self.fields and (self[k].type!='id' or id)])
8419
8420 - def _build_query(self,key):
8421 """ for keyed table only """ 8422 query = None 8423 for k,v in key.iteritems(): 8424 if k in self._primarykey: 8425 if query: 8426 query = query & (self[k] == v) 8427 else: 8428 query = (self[k] == v) 8429 else: 8430 raise SyntaxError( 8431 'Field %s is not part of the primary key of %s' % \ 8432 (k,self._tablename)) 8433 return query
8434
8435 - def __getitem__(self, key):
8436 if not key: 8437 return None 8438 elif isinstance(key, dict): 8439 """ for keyed table """ 8440 query = self._build_query(key) 8441 return self._db(query).select(limitby=(0,1), orderby_on_limitby=False).first() 8442 elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key): 8443 return self._db(self._id == key).select(limitby=(0,1), orderby_on_limitby=False).first() 8444 elif key: 8445 return ogetattr(self, str(key))
8446
8447 - def __call__(self, key=DEFAULT, **kwargs):
8448 for_update = kwargs.get('_for_update',False) 8449 if '_for_update' in kwargs: del kwargs['_for_update'] 8450 8451 orderby = kwargs.get('_orderby',None) 8452 if '_orderby' in kwargs: del kwargs['_orderby'] 8453 8454 if not key is DEFAULT: 8455 if isinstance(key, Query): 8456 record = self._db(key).select( 8457 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8458 elif not str(key).isdigit(): 8459 record = None 8460 else: 8461 record = self._db(self._id == key).select( 8462 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8463 if record: 8464 for k,v in kwargs.iteritems(): 8465 if record[k]!=v: return None 8466 return record 8467 elif kwargs: 8468 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) 8469 return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8470 else: 8471 return None
8472
8473 - def __setitem__(self, key, value):
8474 if isinstance(key, dict) and isinstance(value, dict): 8475 """ option for keyed table """ 8476 if set(key.keys()) == set(self._primarykey): 8477 value = self._filter_fields(value) 8478 kv = {} 8479 kv.update(value) 8480 kv.update(key) 8481 if not self.insert(**kv): 8482 query = self._build_query(key) 8483 self._db(query).update(**self._filter_fields(value)) 8484 else: 8485 raise SyntaxError( 8486 'key must have all fields from primary key: %s'%\ 8487 (self._primarykey)) 8488 elif str(key).isdigit(): 8489 if key == 0: 8490 self.insert(**self._filter_fields(value)) 8491 elif self._db(self._id == key)\ 8492 .update(**self._filter_fields(value)) is None: 8493 raise SyntaxError('No such record: %s' % key) 8494 else: 8495 if isinstance(key, dict): 8496 raise SyntaxError( 8497 'value must be a dictionary: %s' % value) 8498 osetattr(self, str(key), value)
8499 8500 __getattr__ = __getitem__ 8501
8502 - def __setattr__(self, key, value):
8503 if key[:1]!='_' and key in self: 8504 raise SyntaxError('Object exists and cannot be redefined: %s' % key) 8505 osetattr(self,key,value)
8506
8507 - def __delitem__(self, key):
8508 if isinstance(key, dict): 8509 query = self._build_query(key) 8510 if not self._db(query).delete(): 8511 raise SyntaxError('No such record: %s' % key) 8512 elif not str(key).isdigit() or \ 8513 not self._db(self._id == key).delete(): 8514 raise SyntaxError('No such record: %s' % key)
8515
8516 - def __contains__(self,key):
8517 return hasattr(self,key)
8518 8519 has_key = __contains__ 8520
8521 - def items(self):
8522 return self.__dict__.items()
8523
8524 - def __iter__(self):
8525 for fieldname in self.fields: 8526 yield self[fieldname]
8527
8528 - def iteritems(self):
8529 return self.__dict__.iteritems()
8530 8531
8532 - def __repr__(self):
8533 return '<Table %s (%s)>' % (self._tablename,','.join(self.fields()))
8534
8535 - def __str__(self):
8536 if self._ot is not None: 8537 ot = self._db._adapter.QUOTE_TEMPLATE % self._ot 8538 if 'Oracle' in str(type(self._db._adapter)): 8539 return '%s %s' % (ot, self._tablename) 8540 return '%s AS %s' % (ot, self._tablename) 8541 return self._tablename
8542
8543 - def _drop(self, mode = ''):
8544 return self._db._adapter._drop(self, mode)
8545
8546 - def drop(self, mode = ''):
8547 return self._db._adapter.drop(self,mode)
8548
8549 - def _listify(self,fields,update=False):
8550 new_fields = {} # format: new_fields[name] = (field,value) 8551 8552 # store all fields passed as input in new_fields 8553 for name in fields: 8554 if not name in self.fields: 8555 if name != 'id': 8556 raise SyntaxError( 8557 'Field %s does not belong to the table' % name) 8558 else: 8559 field = self[name] 8560 value = fields[name] 8561 if field.filter_in: 8562 value = field.filter_in(value) 8563 new_fields[name] = (field,value) 8564 8565 # check all fields that should be in the table but are not passed 8566 to_compute = [] 8567 for ofield in self: 8568 name = ofield.name 8569 if not name in new_fields: 8570 # if field is supposed to be computed, compute it! 8571 if ofield.compute: # save those to compute for later 8572 to_compute.append((name,ofield)) 8573 # if field is required, check its default value 8574 elif not update and not ofield.default is None: 8575 value = ofield.default 8576 fields[name] = value 8577 new_fields[name] = (ofield,value) 8578 # if this is an update, user the update field instead 8579 elif update and not ofield.update is None: 8580 value = ofield.update 8581 fields[name] = value 8582 new_fields[name] = (ofield,value) 8583 # if the field is still not there but it should, error 8584 elif not update and ofield.required: 8585 raise RuntimeError( 8586 'Table: missing required field: %s' % name) 8587 # now deal with fields that are supposed to be computed 8588 if to_compute: 8589 row = Row(fields) 8590 for name,ofield in to_compute: 8591 # try compute it 8592 try: 8593 row[name] = new_value = ofield.compute(row) 8594 new_fields[name] = (ofield, new_value) 8595 except (KeyError, AttributeError): 8596 # error silently unless field is required! 8597 if ofield.required: 8598 raise SyntaxError('unable to compute field: %s' % name) 8599 return new_fields.values()
8600
8601 - def _attempt_upload(self, fields):
8602 for field in self: 8603 if field.type=='upload' and field.name in fields: 8604 value = fields[field.name] 8605 if value is not None and not isinstance(value,str): 8606 if hasattr(value,'file') and hasattr(value,'filename'): 8607 new_name = field.store(value.file,filename=value.filename) 8608 elif hasattr(value,'read') and hasattr(value,'name'): 8609 new_name = field.store(value,filename=value.name) 8610 else: 8611 raise RuntimeError("Unable to handle upload") 8612 fields[field.name] = new_name
8613
8614 - def _defaults(self, fields):
8615 "If there are no fields/values specified, return table defaults" 8616 if not fields: 8617 fields = {} 8618 for field in self: 8619 if field.type != "id": 8620 fields[field.name] = field.default 8621 return fields
8622
8623 - def _insert(self, **fields):
8624 fields = self._defaults(fields) 8625 return self._db._adapter._insert(self, self._listify(fields))
8626
8627 - def insert(self, **fields):
8628 fields = self._defaults(fields) 8629 self._attempt_upload(fields) 8630 if any(f(fields) for f in self._before_insert): return 0 8631 ret = self._db._adapter.insert(self, self._listify(fields)) 8632 if ret and self._after_insert: 8633 fields = Row(fields) 8634 [f(fields,ret) for f in self._after_insert] 8635 return ret
8636
8637 - def validate_and_insert(self,**fields):
8638 response = Row() 8639 response.errors = Row() 8640 new_fields = copy.copy(fields) 8641 for key,value in fields.iteritems(): 8642 value,error = self[key].validate(value) 8643 if error: 8644 response.errors[key] = "%s" % error 8645 else: 8646 new_fields[key] = value 8647 if not response.errors: 8648 response.id = self.insert(**new_fields) 8649 else: 8650 response.id = None 8651 return response
8652
8653 - def update_or_insert(self, _key=DEFAULT, **values):
8654 if _key is DEFAULT: 8655 record = self(**values) 8656 elif isinstance(_key,dict): 8657 record = self(**_key) 8658 else: 8659 record = self(_key) 8660 if record: 8661 record.update_record(**values) 8662 newid = None 8663 else: 8664 newid = self.insert(**values) 8665 return newid
8666
8667 - def bulk_insert(self, items):
8668 """ 8669 here items is a list of dictionaries 8670 """ 8671 items = [self._listify(item) for item in items] 8672 if any(f(item) for item in items for f in self._before_insert):return 0 8673 ret = self._db._adapter.bulk_insert(self,items) 8674 ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] 8675 return ret
8676
8677 - def _truncate(self, mode = None):
8678 return self._db._adapter._truncate(self, mode)
8679
8680 - def truncate(self, mode = None):
8681 return self._db._adapter.truncate(self, mode)
8682
8683 - def import_from_csv_file( 8684 self, 8685 csvfile, 8686 id_map=None, 8687 null='<NULL>', 8688 unique='uuid', 8689 id_offset=None, # id_offset used only when id_map is None 8690 *args, **kwargs 8691 ):
8692 """ 8693 Import records from csv file. 8694 Column headers must have same names as table fields. 8695 Field 'id' is ignored. 8696 If column names read 'table.file' the 'table.' prefix is ignored. 8697 'unique' argument is a field which must be unique 8698 (typically a uuid field) 8699 'restore' argument is default False; 8700 if set True will remove old values in table first. 8701 'id_map' ff set to None will not map ids. 8702 The import will keep the id numbers in the restored table. 8703 This assumes that there is an field of type id that 8704 is integer and in incrementing order. 8705 Will keep the id numbers in restored table. 8706 """ 8707 8708 delimiter = kwargs.get('delimiter', ',') 8709 quotechar = kwargs.get('quotechar', '"') 8710 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 8711 restore = kwargs.get('restore', False) 8712 if restore: 8713 self._db[self].truncate() 8714 8715 reader = csv.reader(csvfile, delimiter=delimiter, 8716 quotechar=quotechar, quoting=quoting) 8717 colnames = None 8718 if isinstance(id_map, dict): 8719 if not self._tablename in id_map: 8720 id_map[self._tablename] = {} 8721 id_map_self = id_map[self._tablename] 8722 8723 def fix(field, value, id_map, id_offset): 8724 list_reference_s='list:reference' 8725 if value == null: 8726 value = None 8727 elif field.type=='blob': 8728 value = base64.b64decode(value) 8729 elif field.type=='double' or field.type=='float': 8730 if not value.strip(): 8731 value = None 8732 else: 8733 value = float(value) 8734 elif field.type in ('integer','bigint'): 8735 if not value.strip(): 8736 value = None 8737 else: 8738 value = long(value) 8739 elif field.type.startswith('list:string'): 8740 value = bar_decode_string(value) 8741 elif field.type.startswith(list_reference_s): 8742 ref_table = field.type[len(list_reference_s):].strip() 8743 if id_map is not None: 8744 value = [id_map[ref_table][long(v)] \ 8745 for v in bar_decode_string(value)] 8746 else: 8747 value = [v for v in bar_decode_string(value)] 8748 elif field.type.startswith('list:'): 8749 value = bar_decode_integer(value) 8750 elif id_map and field.type.startswith('reference'): 8751 try: 8752 value = id_map[field.type[9:].strip()][long(value)] 8753 except KeyError: 8754 pass 8755 elif id_offset and field.type.startswith('reference'): 8756 try: 8757 value = id_offset[field.type[9:].strip()]+long(value) 8758 except KeyError: 8759 pass 8760 return (field.name, value)
8761 8762 def is_id(colname): 8763 if colname in self: 8764 return self[colname].type == 'id' 8765 else: 8766 return False 8767 8768 first = True 8769 unique_idx = None 8770 for line in reader: 8771 if not line: 8772 break 8773 if not colnames: 8774 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 8775 cols, cid = [], None 8776 for i,colname in enumerate(colnames): 8777 if is_id(colname): 8778 cid = i 8779 else: 8780 cols.append(i) 8781 if colname == unique: 8782 unique_idx = i 8783 else: 8784 items = [fix(self[colnames[i]], line[i], id_map, id_offset) \ 8785 for i in cols if colnames[i] in self.fields] 8786 8787 if not id_map and cid is not None and id_offset is not None and not unique_idx: 8788 csv_id = long(line[cid]) 8789 curr_id = self.insert(**dict(items)) 8790 if first: 8791 first = False 8792 # First curr_id is bigger than csv_id, 8793 # then we are not restoring but 8794 # extending db table with csv db table 8795 if curr_id>csv_id: 8796 id_offset[self._tablename] = curr_id-csv_id 8797 else: 8798 id_offset[self._tablename] = 0 8799 # create new id until we get the same as old_id+offset 8800 while curr_id<csv_id+id_offset[self._tablename]: 8801 self._db(self._db[self][colnames[cid]] == curr_id).delete() 8802 curr_id = self.insert(**dict(items)) 8803 # Validation. Check for duplicate of 'unique' &, 8804 # if present, update instead of insert. 8805 elif not unique_idx: 8806 new_id = self.insert(**dict(items)) 8807 else: 8808 unique_value = line[unique_idx] 8809 query = self._db[self][unique] == unique_value 8810 record = self._db(query).select().first() 8811 if record: 8812 record.update_record(**dict(items)) 8813 new_id = record[self._id.name] 8814 else: 8815 new_id = self.insert(**dict(items)) 8816 if id_map and cid is not None: 8817 id_map_self[long(line[cid])] = new_id 8818
8819 - def as_dict(self, flat=False, sanitize=True, field_options=True):
8820 tablename = str(self) 8821 table_as_dict = dict(name=tablename, items={}, fields=[], 8822 sequence_name=self._sequence_name, 8823 trigger_name=self._trigger_name, 8824 common_filter=self._common_filter, format=self._format, 8825 singular=self._singular, plural=self._plural) 8826 8827 for field in self: 8828 if (field.readable or field.writable) or (not sanitize): 8829 table_as_dict["fields"].append(field.name) 8830 table_as_dict["items"][field.name] = \ 8831 field.as_dict(flat=flat, sanitize=sanitize, 8832 options=field_options) 8833 return table_as_dict
8834
8835 - def as_xml(self, sanitize=True, field_options=True):
8836 if not have_serializers: 8837 raise ImportError("No xml serializers available") 8838 d = self.as_dict(flat=True, sanitize=sanitize, 8839 field_options=field_options) 8840 return serializers.xml(d)
8841
8842 - def as_json(self, sanitize=True, field_options=True):
8843 if not have_serializers: 8844 raise ImportError("No json serializers available") 8845 d = self.as_dict(flat=True, sanitize=sanitize, 8846 field_options=field_options) 8847 return serializers.json(d)
8848
8849 - def as_yaml(self, sanitize=True, field_options=True):
8850 if not have_serializers: 8851 raise ImportError("No YAML serializers available") 8852 d = self.as_dict(flat=True, sanitize=sanitize, 8853 field_options=field_options) 8854 return serializers.yaml(d)
8855
8856 - def with_alias(self, alias):
8857 return self._db._adapter.alias(self,alias)
8858
8859 - def on(self, query):
8860 return Expression(self._db,self._db._adapter.ON,self,query)
8861
8862 -def archive_record(qset,fs,archive_table,current_record):
8863 tablenames = qset.db._adapter.tables(qset.query) 8864 if len(tablenames)!=1: raise RuntimeError("cannot update join") 8865 table = qset.db[tablenames[0]] 8866 for row in qset.select(): 8867 fields = archive_table._filter_fields(row) 8868 fields[current_record] = row.id 8869 archive_table.insert(**fields) 8870 return False
8871
8872 8873 8874 -class Expression(object):
8875
8876 - def __init__( 8877 self, 8878 db, 8879 op, 8880 first=None, 8881 second=None, 8882 type=None, 8883 **optional_args 8884 ):
8885 8886 self.db = db 8887 self.op = op 8888 self.first = first 8889 self.second = second 8890 self._table = getattr(first,'_table',None) 8891 ### self._tablename = first._tablename ## CHECK 8892 if not type and first and hasattr(first,'type'): 8893 self.type = first.type 8894 else: 8895 self.type = type 8896 self.optional_args = optional_args
8897
8898 - def sum(self):
8899 db = self.db 8900 return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
8901
8902 - def max(self):
8903 db = self.db 8904 return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
8905
8906 - def min(self):
8907 db = self.db 8908 return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
8909
8910 - def len(self):
8911 db = self.db 8912 return Expression(db, db._adapter.LENGTH, self, None, 'integer')
8913
8914 - def avg(self):
8915 db = self.db 8916 return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
8917
8918 - def abs(self):
8919 db = self.db 8920 return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type)
8921
8922 - def lower(self):
8923 db = self.db 8924 return Expression(db, db._adapter.LOWER, self, None, self.type)
8925
8926 - def upper(self):
8927 db = self.db 8928 return Expression(db, db._adapter.UPPER, self, None, self.type)
8929
8930 - def replace(self,a,b):
8931 db = self.db 8932 return Expression(db, db._adapter.REPLACE, self, (a,b), self.type)
8933
8934 - def year(self):
8935 db = self.db 8936 return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
8937
8938 - def month(self):
8939 db = self.db 8940 return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
8941
8942 - def day(self):
8943 db = self.db 8944 return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
8945
8946 - def hour(self):
8947 db = self.db 8948 return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
8949
8950 - def minutes(self):
8951 db = self.db 8952 return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
8953
8954 - def coalesce(self,*others):
8955 db = self.db 8956 return Expression(db, db._adapter.COALESCE, self, others, self.type)
8957
8958 - def coalesce_zero(self):
8959 db = self.db 8960 return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
8961
8962 - def seconds(self):
8963 db = self.db 8964 return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
8965
8966 - def epoch(self):
8967 db = self.db 8968 return Expression(db, db._adapter.EPOCH, self, None, 'integer')
8969
8970 - def __getslice__(self, start, stop):
8971 db = self.db 8972 if start < 0: 8973 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 8974 else: 8975 pos0 = start + 1 8976 8977 if stop < 0: 8978 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 8979 elif stop == sys.maxint: 8980 length = self.len() 8981 else: 8982 length = '(%s - %s)' % (stop + 1, pos0) 8983 return Expression(db,db._adapter.SUBSTRING, 8984 self, (pos0, length), self.type)
8985
8986 - def __getitem__(self, i):
8987 return self[i:i + 1]
8988
8989 - def __str__(self):
8990 return self.db._adapter.expand(self,self.type)
8991
8992 - def __or__(self, other): # for use in sortby
8993 db = self.db 8994 return Expression(db,db._adapter.COMMA,self,other,self.type)
8995
8996 - def __invert__(self):
8997 db = self.db 8998 if hasattr(self,'_op') and self.op == db._adapter.INVERT: 8999 return self.first 9000 return Expression(db,db._adapter.INVERT,self,type=self.type)
9001
9002 - def __add__(self, other):
9003 db = self.db 9004 return Expression(db,db._adapter.ADD,self,other,self.type)
9005
9006 - def __sub__(self, other):
9007 db = self.db 9008 if self.type in ('integer','bigint'): 9009 result_type = 'integer' 9010 elif self.type in ['date','time','datetime','double','float']: 9011 result_type = 'double' 9012 elif self.type.startswith('decimal('): 9013 result_type = self.type 9014 else: 9015 raise SyntaxError("subtraction operation not supported for type") 9016 return Expression(db,db._adapter.SUB,self,other,result_type)
9017
9018 - def __mul__(self, other):
9019 db = self.db 9020 return Expression(db,db._adapter.MUL,self,other,self.type)
9021
9022 - def __div__(self, other):
9023 db = self.db 9024 return Expression(db,db._adapter.DIV,self,other,self.type)
9025
9026 - def __mod__(self, other):
9027 db = self.db 9028 return Expression(db,db._adapter.MOD,self,other,self.type)
9029
9030 - def __eq__(self, value):
9031 db = self.db 9032 return Query(db, db._adapter.EQ, self, value)
9033
9034 - def __ne__(self, value):
9035 db = self.db 9036 return Query(db, db._adapter.NE, self, value)
9037
9038 - def __lt__(self, value):
9039 db = self.db 9040 return Query(db, db._adapter.LT, self, value)
9041
9042 - def __le__(self, value):
9043 db = self.db 9044 return Query(db, db._adapter.LE, self, value)
9045
9046 - def __gt__(self, value):
9047 db = self.db 9048 return Query(db, db._adapter.GT, self, value)
9049
9050 - def __ge__(self, value):
9051 db = self.db 9052 return Query(db, db._adapter.GE, self, value)
9053
9054 - def like(self, value, case_sensitive=False):
9055 db = self.db 9056 op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE 9057 return Query(db, op, self, value)
9058
9059 - def regexp(self, value):
9060 db = self.db 9061 return Query(db, db._adapter.REGEXP, self, value)
9062
9063 - def belongs(self, *value):
9064 """ 9065 Accepts the following inputs: 9066 field.belongs(1,2) 9067 field.belongs((1,2)) 9068 field.belongs(query) 9069 9070 Does NOT accept: 9071 field.belongs(1) 9072 """ 9073 db = self.db 9074 if len(value) == 1: 9075 value = value[0] 9076 if isinstance(value,Query): 9077 value = db(value)._select(value.first._table._id) 9078 return Query(db, db._adapter.BELONGS, self, value)
9079
9080 - def startswith(self, value):
9081 db = self.db 9082 if not self.type in ('string', 'text', 'json'): 9083 raise SyntaxError("startswith used with incompatible field type") 9084 return Query(db, db._adapter.STARTSWITH, self, value)
9085
9086 - def endswith(self, value):
9087 db = self.db 9088 if not self.type in ('string', 'text', 'json'): 9089 raise SyntaxError("endswith used with incompatible field type") 9090 return Query(db, db._adapter.ENDSWITH, self, value)
9091
9092 - def contains(self, value, all=False, case_sensitive=False):
9093 """ 9094 The case_sensitive parameters is only useful for PostgreSQL 9095 For other RDMBs it is ignored and contains is always case in-sensitive 9096 For MongoDB and GAE contains is always case sensitive 9097 """ 9098 db = self.db 9099 if isinstance(value,(list, tuple)): 9100 subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive) 9101 for v in value if str(v).strip()] 9102 if not subqueries: 9103 return self.contains('') 9104 else: 9105 return reduce(all and AND or OR,subqueries) 9106 if not self.type in ('string', 'text', 'json') and not self.type.startswith('list:'): 9107 raise SyntaxError("contains used with incompatible field type") 9108 return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive)
9109
9110 - def with_alias(self, alias):
9111 db = self.db 9112 return Expression(db, db._adapter.AS, self, alias, self.type)
9113 9114 # GIS expressions 9115
9116 - def st_asgeojson(self, precision=15, options=0, version=1):
9117 return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self, 9118 dict(precision=precision, options=options, 9119 version=version), 'string')
9120
9121 - def st_astext(self):
9122 db = self.db 9123 return Expression(db, db._adapter.ST_ASTEXT, self, type='string')
9124
9125 - def st_x(self):
9126 db = self.db 9127 return Expression(db, db._adapter.ST_X, self, type='string')
9128
9129 - def st_y(self):
9130 db = self.db 9131 return Expression(db, db._adapter.ST_Y, self, type='string')
9132
9133 - def st_distance(self, other):
9134 db = self.db 9135 return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double')
9136
9137 - def st_simplify(self, value):
9138 db = self.db 9139 return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type)
9140 9141 # GIS queries 9142
9143 - def st_contains(self, value):
9144 db = self.db 9145 return Query(db, db._adapter.ST_CONTAINS, self, value)
9146
9147 - def st_equals(self, value):
9148 db = self.db 9149 return Query(db, db._adapter.ST_EQUALS, self, value)
9150
9151 - def st_intersects(self, value):
9152 db = self.db 9153 return Query(db, db._adapter.ST_INTERSECTS, self, value)
9154
9155 - def st_overlaps(self, value):
9156 db = self.db 9157 return Query(db, db._adapter.ST_OVERLAPS, self, value)
9158
9159 - def st_touches(self, value):
9160 db = self.db 9161 return Query(db, db._adapter.ST_TOUCHES, self, value)
9162
9163 - def st_within(self, value):
9164 db = self.db 9165 return Query(db, db._adapter.ST_WITHIN, self, value)
9166
9167 # for use in both Query and sortby 9168 9169 9170 -class SQLCustomType(object):
9171 """ 9172 allows defining of custom SQL types 9173 9174 Example:: 9175 9176 decimal = SQLCustomType( 9177 type ='double', 9178 native ='integer', 9179 encoder =(lambda x: int(float(x) * 100)), 9180 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 9181 ) 9182 9183 db.define_table( 9184 'example', 9185 Field('value', type=decimal) 9186 ) 9187 9188 :param type: the web2py type (default = 'string') 9189 :param native: the backend type 9190 :param encoder: how to encode the value to store it in the backend 9191 :param decoder: how to decode the value retrieved from the backend 9192 :param validator: what validators to use ( default = None, will use the 9193 default validator for type) 9194 """ 9195
9196 - def __init__( 9197 self, 9198 type='string', 9199 native=None, 9200 encoder=None, 9201 decoder=None, 9202 validator=None, 9203 _class=None, 9204 ):
9205 9206 self.type = type 9207 self.native = native 9208 self.encoder = encoder or (lambda x: x) 9209 self.decoder = decoder or (lambda x: x) 9210 self.validator = validator 9211 self._class = _class or type
9212
9213 - def startswith(self, text=None):
9214 try: 9215 return self.type.startswith(self, text) 9216 except TypeError: 9217 return False
9218
9219 - def __getslice__(self, a=0, b=100):
9220 return None
9221
9222 - def __getitem__(self, i):
9223 return None
9224
9225 - def __str__(self):
9226 return self._class
9227
9228 -class FieldVirtual(object):
9229 - def __init__(self, name, f=None, ftype='string',label=None,table_name=None):
9230 # for backward compatibility 9231 (self.name, self.f) = (name, f) if f else ('unknown', name) 9232 self.type = ftype 9233 self.label = label or self.name.capitalize().replace('_',' ') 9234 self.represent = lambda v,r:v 9235 self.formatter = IDENTITY 9236 self.comment = None 9237 self.readable = True 9238 self.writable = False 9239 self.requires = None 9240 self.widget = None 9241 self.tablename = table_name 9242 self.filter_out = None
9243 - def __str__(self):
9244 return '%s.%s' % (self.tablename, self.name)
9245
9246 -class FieldMethod(object):
9247 - def __init__(self, name, f=None, handler=None):
9248 # for backward compatibility 9249 (self.name, self.f) = (name, f) if f else ('unknown', name) 9250 self.handler = handler
9251
9252 -def list_represent(x,r=None):
9253 return ', '.join(str(y) for y in x or [])
9254
9255 -class Field(Expression):
9256 9257 Virtual = FieldVirtual 9258 Method = FieldMethod 9259 Lazy = FieldMethod # for backward compatibility 9260 9261 """ 9262 an instance of this class represents a database field 9263 9264 example:: 9265 9266 a = Field(name, 'string', length=32, default=None, required=False, 9267 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 9268 notnull=False, unique=False, 9269 uploadfield=True, widget=None, label=None, comment=None, 9270 uploadfield=True, # True means store on disk, 9271 # 'a_field_name' means store in this field in db 9272 # False means file content will be discarded. 9273 writable=True, readable=True, update=None, authorize=None, 9274 autodelete=False, represent=None, uploadfolder=None, 9275 uploadseparate=False # upload to separate directories by uuid_keys 9276 # first 2 character and tablename.fieldname 9277 # False - old behavior 9278 # True - put uploaded file in 9279 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 9280 # directory) 9281 uploadfs=None # a pyfilesystem where to store upload 9282 9283 to be used as argument of DAL.define_table 9284 9285 allowed field types: 9286 string, boolean, integer, double, text, blob, 9287 date, time, datetime, upload, password 9288 9289 """ 9290
9291 - def __init__( 9292 self, 9293 fieldname, 9294 type='string', 9295 length=None, 9296 default=DEFAULT, 9297 required=False, 9298 requires=DEFAULT, 9299 ondelete='CASCADE', 9300 notnull=False, 9301 unique=False, 9302 uploadfield=True, 9303 widget=None, 9304 label=None, 9305 comment=None, 9306 writable=True, 9307 readable=True, 9308 update=None, 9309 authorize=None, 9310 autodelete=False, 9311 represent=None, 9312 uploadfolder=None, 9313 uploadseparate=False, 9314 uploadfs=None, 9315 compute=None, 9316 custom_store=None, 9317 custom_retrieve=None, 9318 custom_retrieve_file_properties=None, 9319 custom_delete=None, 9320 filter_in = None, 9321 filter_out = None, 9322 custom_qualifier = None, 9323 map_none = None, 9324 ):
9325 self._db = self.db = None # both for backward compatibility 9326 self.op = None 9327 self.first = None 9328 self.second = None 9329 self.name = fieldname = cleanup(fieldname) 9330 if not isinstance(fieldname,str) or hasattr(Table,fieldname) or \ 9331 fieldname[0] == '_' or REGEX_PYTHON_KEYWORDS.match(fieldname): 9332 raise SyntaxError('Field: invalid field name: %s' % fieldname) 9333 self.type = type if not isinstance(type, (Table,Field)) else 'reference %s' % type 9334 self.length = length if not length is None else DEFAULTLENGTH.get(self.type,512) 9335 self.default = default if default!=DEFAULT else (update or None) 9336 self.required = required # is this field required 9337 self.ondelete = ondelete.upper() # this is for reference fields only 9338 self.notnull = notnull 9339 self.unique = unique 9340 self.uploadfield = uploadfield 9341 self.uploadfolder = uploadfolder 9342 self.uploadseparate = uploadseparate 9343 self.uploadfs = uploadfs 9344 self.widget = widget 9345 self.comment = comment 9346 self.writable = writable 9347 self.readable = readable 9348 self.update = update 9349 self.authorize = authorize 9350 self.autodelete = autodelete 9351 self.represent = list_represent if \ 9352 represent==None and type in ('list:integer','list:string') else represent 9353 self.compute = compute 9354 self.isattachment = True 9355 self.custom_store = custom_store 9356 self.custom_retrieve = custom_retrieve 9357 self.custom_retrieve_file_properties = custom_retrieve_file_properties 9358 self.custom_delete = custom_delete 9359 self.filter_in = filter_in 9360 self.filter_out = filter_out 9361 self.custom_qualifier = custom_qualifier 9362 self.label = label if label!=None else fieldname.replace('_',' ').title() 9363 self.requires = requires if requires!=None else [] 9364 self.map_none = map_none
9365
9366 - def set_attributes(self,*args,**attributes):
9367 self.__dict__.update(*args,**attributes)
9368
9369 - def clone(self,point_self_references_to=False,**args):
9370 field = copy.copy(self) 9371 if point_self_references_to and \ 9372 field.type == 'reference %s'+field._tablename: 9373 field.type = 'reference %s' % point_self_references_to 9374 field.__dict__.update(args) 9375 return field
9376
9377 - def store(self, file, filename=None, path=None):
9378 if self.custom_store: 9379 return self.custom_store(file,filename,path) 9380 if isinstance(file, cgi.FieldStorage): 9381 filename = filename or file.filename 9382 file = file.file 9383 elif not filename: 9384 filename = file.name 9385 filename = os.path.basename(filename.replace('/', os.sep)\ 9386 .replace('\\', os.sep)) 9387 m = REGEX_STORE_PATTERN.search(filename) 9388 extension = m and m.group('e') or 'txt' 9389 uuid_key = web2py_uuid().replace('-', '')[-16:] 9390 encoded_filename = base64.b16encode(filename).lower() 9391 newfilename = '%s.%s.%s.%s' % \ 9392 (self._tablename, self.name, uuid_key, encoded_filename) 9393 newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension 9394 self_uploadfield = self.uploadfield 9395 if isinstance(self_uploadfield,Field): 9396 blob_uploadfield_name = self_uploadfield.uploadfield 9397 keys={self_uploadfield.name: newfilename, 9398 blob_uploadfield_name: file.read()} 9399 self_uploadfield.table.insert(**keys) 9400 elif self_uploadfield == True: 9401 if path: 9402 pass 9403 elif self.uploadfolder: 9404 path = self.uploadfolder 9405 elif self.db._adapter.folder: 9406 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9407 else: 9408 raise RuntimeError( 9409 "you must specify a Field(...,uploadfolder=...)") 9410 if self.uploadseparate: 9411 if self.uploadfs: 9412 raise RuntimeError("not supported") 9413 path = pjoin(path,"%s.%s" %(self._tablename, self.name), 9414 uuid_key[:2]) 9415 if not exists(path): 9416 os.makedirs(path) 9417 pathfilename = pjoin(path, newfilename) 9418 if self.uploadfs: 9419 dest_file = self.uploadfs.open(newfilename, 'wb') 9420 else: 9421 dest_file = open(pathfilename, 'wb') 9422 try: 9423 shutil.copyfileobj(file, dest_file) 9424 except IOError: 9425 raise IOError( 9426 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename) 9427 dest_file.close() 9428 return newfilename
9429
9430 - def retrieve(self, name, path=None, nameonly=False):
9431 """ 9432 if nameonly==True return (filename, fullfilename) instead of 9433 (filename, stream) 9434 """ 9435 self_uploadfield = self.uploadfield 9436 if self.custom_retrieve: 9437 return self.custom_retrieve(name, path) 9438 import http 9439 if self.authorize or isinstance(self_uploadfield, str): 9440 row = self.db(self == name).select().first() 9441 if not row: 9442 raise http.HTTP(404) 9443 if self.authorize and not self.authorize(row): 9444 raise http.HTTP(403) 9445 m = REGEX_UPLOAD_PATTERN.match(name) 9446 if not m or not self.isattachment: 9447 raise TypeError('Can\'t retrieve %s' % name) 9448 file_properties = self.retrieve_file_properties(name,path) 9449 filename = file_properties['filename'] 9450 if isinstance(self_uploadfield, str): # ## if file is in DB 9451 stream = StringIO.StringIO(row[self_uploadfield] or '') 9452 elif isinstance(self_uploadfield,Field): 9453 blob_uploadfield_name = self_uploadfield.uploadfield 9454 query = self_uploadfield == name 9455 data = self_uploadfield.table(query)[blob_uploadfield_name] 9456 stream = StringIO.StringIO(data) 9457 elif self.uploadfs: 9458 # ## if file is on pyfilesystem 9459 stream = self.uploadfs.open(name, 'rb') 9460 else: 9461 # ## if file is on regular filesystem 9462 # this is intentially a sting with filename and not a stream 9463 # this propagates and allows stream_file_or_304_or_206 to be called 9464 fullname = pjoin(file_properties['path'],name) 9465 if nameonly: 9466 return (filename, fullname) 9467 stream = open(fullname,'rb') 9468 return (filename, stream)
9469
9470 - def retrieve_file_properties(self, name, path=None):
9471 self_uploadfield = self.uploadfield 9472 if self.custom_retrieve_file_properties: 9473 return self.custom_retrieve_file_properties(name, path) 9474 try: 9475 m = REGEX_UPLOAD_PATTERN.match(name) 9476 if not m or not self.isattachment: 9477 raise TypeError('Can\'t retrieve %s file properties' % name) 9478 filename = base64.b16decode(m.group('name'), True) 9479 filename = REGEX_CLEANUP_FN.sub('_', filename) 9480 except (TypeError, AttributeError): 9481 filename = name 9482 if isinstance(self_uploadfield, str): # ## if file is in DB 9483 return dict(path=None,filename=filename) 9484 elif isinstance(self_uploadfield,Field): 9485 return dict(path=None,filename=filename) 9486 else: 9487 # ## if file is on filesystem 9488 if path: 9489 pass 9490 elif self.uploadfolder: 9491 path = self.uploadfolder 9492 else: 9493 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9494 if self.uploadseparate: 9495 t = m.group('table') 9496 f = m.group('field') 9497 u = m.group('uuidkey') 9498 path = pjoin(path,"%s.%s" % (t,f),u[:2]) 9499 return dict(path=path,filename=filename)
9500 9501
9502 - def formatter(self, value):
9503 requires = self.requires 9504 if value is None or not requires: 9505 return value or self.map_none 9506 if not isinstance(requires, (list, tuple)): 9507 requires = [requires] 9508 elif isinstance(requires, tuple): 9509 requires = list(requires) 9510 else: 9511 requires = copy.copy(requires) 9512 requires.reverse() 9513 for item in requires: 9514 if hasattr(item, 'formatter'): 9515 value = item.formatter(value) 9516 return value
9517
9518 - def validate(self, value):
9519 if not self.requires or self.requires == DEFAULT: 9520 return ((value if value!=self.map_none else None), None) 9521 requires = self.requires 9522 if not isinstance(requires, (list, tuple)): 9523 requires = [requires] 9524 for validator in requires: 9525 (value, error) = validator(value) 9526 if error: 9527 return (value, error) 9528 return ((value if value!=self.map_none else None), None)
9529
9530 - def count(self, distinct=None):
9531 return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
9532
9533 - def as_dict(self, flat=False, sanitize=True, options=True):
9534 9535 attrs = ('type', 'length', 'default', 'required', 9536 'ondelete', 'notnull', 'unique', 'uploadfield', 9537 'widget', 'label', 'comment', 'writable', 'readable', 9538 'update', 'authorize', 'autodelete', 'represent', 9539 'uploadfolder', 'uploadseparate', 'uploadfs', 9540 'compute', 'custom_store', 'custom_retrieve', 9541 'custom_retrieve_file_properties', 'custom_delete', 9542 'filter_in', 'filter_out', 'custom_qualifier', 9543 'map_none', 'name') 9544 9545 SERIALIZABLE_TYPES = (int, long, basestring, dict, list, 9546 float, tuple, bool, type(None)) 9547 9548 def flatten(obj): 9549 if flat: 9550 if isinstance(obj, flatten.__class__): 9551 return str(type(obj)) 9552 elif isinstance(obj, type): 9553 try: 9554 return str(obj).split("'")[1] 9555 except IndexError: 9556 return str(obj) 9557 elif not isinstance(obj, SERIALIZABLE_TYPES): 9558 return str(obj) 9559 elif isinstance(obj, dict): 9560 newobj = dict() 9561 for k, v in obj.items(): 9562 newobj[k] = flatten(v) 9563 return newobj 9564 elif isinstance(obj, (list, tuple, set)): 9565 return [flatten(v) for v in obj] 9566 else: 9567 return obj 9568 elif isinstance(obj, (dict, set)): 9569 return obj.copy() 9570 else: return obj
9571 9572 def filter_requires(t, r, options=True): 9573 if sanitize and any([keyword in str(t).upper() for 9574 keyword in ("CRYPT", "IS_STRONG")]): 9575 return None 9576 9577 if not isinstance(r, dict): 9578 if options and hasattr(r, "options"): 9579 if callable(r.options): 9580 r.options() 9581 newr = r.__dict__.copy() 9582 else: 9583 newr = r.copy() 9584 9585 # remove options if not required 9586 if not options and newr.has_key("labels"): 9587 [newr.update({key:None}) for key in 9588 ("labels", "theset") if (key in newr)] 9589 9590 for k, v in newr.items(): 9591 if k == "other": 9592 if isinstance(v, dict): 9593 otype, other = v.popitem() 9594 else: 9595 otype = flatten(type(v)) 9596 other = v 9597 newr[k] = {otype: filter_requires(otype, other, 9598 options=options)} 9599 else: 9600 newr[k] = flatten(v) 9601 return newr
9602 9603 if isinstance(self.requires, (tuple, list, set)): 9604 requires = dict([(flatten(type(r)), 9605 filter_requires(type(r), r, 9606 options=options)) for 9607 r in self.requires]) 9608 else: 9609 requires = {flatten(type(self.requires)): 9610 filter_requires(type(self.requires), 9611 self.requires, options=options)} 9612 9613 d = dict(colname="%s.%s" % (self.tablename, self.name), 9614 requires=requires) 9615 d.update([(attr, flatten(getattr(self, attr))) for attr in attrs]) 9616 return d 9617
9618 - def as_xml(self, sanitize=True, options=True):
9619 if have_serializers: 9620 xml = serializers.xml 9621 else: 9622 raise ImportError("No xml serializers available") 9623 d = self.as_dict(flat=True, sanitize=sanitize, 9624 options=options) 9625 return xml(d)
9626
9627 - def as_json(self, sanitize=True, options=True):
9628 if have_serializers: 9629 json = serializers.json 9630 else: 9631 raise ImportError("No json serializers available") 9632 d = self.as_dict(flat=True, sanitize=sanitize, 9633 options=options) 9634 return json(d)
9635
9636 - def as_yaml(self, sanitize=True, options=True):
9637 if have_serializers: 9638 d = self.as_dict(flat=True, sanitize=sanitize, 9639 options=options) 9640 return serializers.yaml(d) 9641 else: 9642 raise ImportError("No YAML serializers available")
9643
9644 - def __nonzero__(self):
9645 return True
9646
9647 - def __str__(self):
9648 try: 9649 return '%s.%s' % (self.tablename, self.name) 9650 except: 9651 return '<no table>.%s' % self.name
9652
9653 9654 -class Query(object):
9655 9656 """ 9657 a query object necessary to define a set. 9658 it can be stored or can be passed to DAL.__call__() to obtain a Set 9659 9660 Example:: 9661 9662 query = db.users.name=='Max' 9663 set = db(query) 9664 records = set.select() 9665 9666 """ 9667
9668 - def __init__( 9669 self, 9670 db, 9671 op, 9672 first=None, 9673 second=None, 9674 ignore_common_filters = False, 9675 **optional_args 9676 ):
9677 self.db = self._db = db 9678 self.op = op 9679 self.first = first 9680 self.second = second 9681 self.ignore_common_filters = ignore_common_filters 9682 self.optional_args = optional_args
9683
9684 - def __repr__(self):
9685 return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
9686
9687 - def __str__(self):
9688 return self.db._adapter.expand(self)
9689
9690 - def __and__(self, other):
9691 return Query(self.db,self.db._adapter.AND,self,other)
9692 9693 __rand__ = __and__ 9694
9695 - def __or__(self, other):
9696 return Query(self.db,self.db._adapter.OR,self,other)
9697 9698 __ror__ = __or__ 9699
9700 - def __invert__(self):
9701 if self.op==self.db._adapter.NOT: 9702 return self.first 9703 return Query(self.db,self.db._adapter.NOT,self)
9704
9705 - def __eq__(self, other):
9706 return repr(self) == repr(other)
9707
9708 - def __ne__(self, other):
9709 return not (self == other)
9710
9711 - def case(self,t=1,f=0):
9712 return self.db._adapter.CASE(self,t,f)
9713
9714 - def as_dict(self, flat=False, sanitize=True):
9715 """Experimental stuff 9716 9717 This allows to return a plain dictionary with the basic 9718 query representation. Can be used with json/xml services 9719 for client-side db I/O 9720 9721 Example: 9722 >>> q = db.auth_user.id != 0 9723 >>> q.as_dict(flat=True) 9724 {"op": "NE", "first":{"tablename": "auth_user", 9725 "fieldname": "id"}, 9726 "second":0} 9727 """ 9728 9729 SERIALIZABLE_TYPES = (tuple, dict, list, int, long, float, 9730 basestring, type(None), bool) 9731 def loop(d): 9732 newd = dict() 9733 for k, v in d.items(): 9734 if k in ("first", "second"): 9735 if isinstance(v, self.__class__): 9736 newd[k] = loop(v.__dict__) 9737 elif isinstance(v, Field): 9738 newd[k] = {"tablename": v._tablename, 9739 "fieldname": v.name} 9740 elif isinstance(v, Expression): 9741 newd[k] = loop(v.__dict__) 9742 elif isinstance(v, SERIALIZABLE_TYPES): 9743 newd[k] = v 9744 elif isinstance(v, (datetime.date, 9745 datetime.time, 9746 datetime.datetime)): 9747 newd[k] = unicode(v) 9748 elif k == "op": 9749 if callable(v): 9750 newd[k] = v.__name__ 9751 elif isinstance(v, basestring): 9752 newd[k] = v 9753 else: pass # not callable or string 9754 elif isinstance(v, SERIALIZABLE_TYPES): 9755 if isinstance(v, dict): 9756 newd[k] = loop(v) 9757 else: newd[k] = v 9758 return newd
9759 9760 if flat: 9761 return loop(self.__dict__) 9762 else: return self.__dict__
9763 9764
9765 - def as_xml(self, sanitize=True):
9766 if have_serializers: 9767 xml = serializers.xml 9768 else: 9769 raise ImportError("No xml serializers available") 9770 d = self.as_dict(flat=True, sanitize=sanitize) 9771 return xml(d)
9772
9773 - def as_json(self, sanitize=True):
9774 if have_serializers: 9775 json = serializers.json 9776 else: 9777 raise ImportError("No json serializers available") 9778 d = self.as_dict(flat=True, sanitize=sanitize) 9779 return json(d)
9780
9781 -def xorify(orderby):
9782 if not orderby: 9783 return None 9784 orderby2 = orderby[0] 9785 for item in orderby[1:]: 9786 orderby2 = orderby2 | item 9787 return orderby2
9788
9789 -def use_common_filters(query):
9790 return (query and hasattr(query,'ignore_common_filters') and \ 9791 not query.ignore_common_filters)
9792
9793 -class Set(object):
9794 9795 """ 9796 a Set represents a set of records in the database, 9797 the records are identified by the query=Query(...) object. 9798 normally the Set is generated by DAL.__call__(Query(...)) 9799 9800 given a set, for example 9801 set = db(db.users.name=='Max') 9802 you can: 9803 set.update(db.users.name='Massimo') 9804 set.delete() # all elements in the set 9805 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 9806 and take subsets: 9807 subset = set(db.users.id<5) 9808 """ 9809
9810 - def __init__(self, db, query, ignore_common_filters = None):
9811 self.db = db 9812 self._db = db # for backward compatibility 9813 self.dquery = None 9814 9815 # if query is a dict, parse it 9816 if isinstance(query, dict): 9817 query = self.parse(query) 9818 9819 if not ignore_common_filters is None and \ 9820 use_common_filters(query) == ignore_common_filters: 9821 query = copy.copy(query) 9822 query.ignore_common_filters = ignore_common_filters 9823 self.query = query
9824
9825 - def __repr__(self):
9826 return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
9827
9828 - def __call__(self, query, ignore_common_filters=False):
9829 if query is None: 9830 return self 9831 elif isinstance(query,Table): 9832 query = self.db._adapter.id_query(query) 9833 elif isinstance(query,str): 9834 query = Expression(self.db,query) 9835 elif isinstance(query,Field): 9836 query = query!=None 9837 if self.query: 9838 return Set(self.db, self.query & query, 9839 ignore_common_filters=ignore_common_filters) 9840 else: 9841 return Set(self.db, query, 9842 ignore_common_filters=ignore_common_filters)
9843
9844 - def _count(self,distinct=None):
9845 return self.db._adapter._count(self.query,distinct)
9846
9847 - def _select(self, *fields, **attributes):
9848 adapter = self.db._adapter 9849 tablenames = adapter.tables(self.query, 9850 attributes.get('join',None), 9851 attributes.get('left',None), 9852 attributes.get('orderby',None), 9853 attributes.get('groupby',None)) 9854 fields = adapter.expand_all(fields, tablenames) 9855 return adapter._select(self.query,fields,attributes)
9856
9857 - def _delete(self):
9858 db = self.db 9859 tablename = db._adapter.get_table(self.query) 9860 return db._adapter._delete(tablename,self.query)
9861
9862 - def _update(self, **update_fields):
9863 db = self.db 9864 tablename = db._adapter.get_table(self.query) 9865 fields = db[tablename]._listify(update_fields,update=True) 9866 return db._adapter._update(tablename,self.query,fields)
9867
9868 - def as_dict(self, flat=False, sanitize=True):
9869 if flat: 9870 uid = dbname = uri = None 9871 codec = self.db._db_codec 9872 if not sanitize: 9873 uri, dbname, uid = (self.db._dbname, str(self.db), 9874 self.db._db_uid) 9875 d = {"query": self.query.as_dict(flat=flat)} 9876 d["db"] = {"uid": uid, "codec": codec, 9877 "name": dbname, "uri": uri} 9878 return d 9879 else: return self.__dict__
9880
9881 - def as_xml(self, sanitize=True):
9882 if have_serializers: 9883 xml = serializers.xml 9884 else: 9885 raise ImportError("No xml serializers available") 9886 d = self.as_dict(flat=True, sanitize=sanitize) 9887 return xml(d)
9888
9889 - def as_json(self, sanitize=True):
9890 if have_serializers: 9891 json = serializers.json 9892 else: 9893 raise ImportError("No json serializers available") 9894 d = self.as_dict(flat=True, sanitize=sanitize) 9895 return json(d)
9896
9897 - def parse(self, dquery):
9898 "Experimental: Turn a dictionary into a Query object" 9899 self.dquery = dquery 9900 return self.build(self.dquery)
9901
9902 - def build(self, d):
9903 "Experimental: see .parse()" 9904 op, first, second = (d["op"], d["first"], 9905 d.get("second", None)) 9906 left = right = built = None 9907 9908 if op in ("AND", "OR"): 9909 if not (type(first), type(second)) == (dict, dict): 9910 raise SyntaxError("Invalid AND/OR query") 9911 if op == "AND": 9912 built = self.build(first) & self.build(second) 9913 else: built = self.build(first) | self.build(second) 9914 9915 elif op == "NOT": 9916 if first is None: 9917 raise SyntaxError("Invalid NOT query") 9918 built = ~self.build(first) 9919 else: 9920 # normal operation (GT, EQ, LT, ...) 9921 for k, v in {"left": first, "right": second}.items(): 9922 if isinstance(v, dict) and v.get("op"): 9923 v = self.build(v) 9924 if isinstance(v, dict) and ("tablename" in v): 9925 v = self.db[v["tablename"]][v["fieldname"]] 9926 if k == "left": left = v 9927 else: right = v 9928 9929 if hasattr(self.db._adapter, op): 9930 opm = getattr(self.db._adapter, op) 9931 9932 if op == "EQ": built = left == right 9933 elif op == "NE": built = left != right 9934 elif op == "GT": built = left > right 9935 elif op == "GE": built = left >= right 9936 elif op == "LT": built = left < right 9937 elif op == "LE": built = left <= right 9938 elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"): 9939 built = Expression(self.db, opm) 9940 elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY", 9941 "COALESCE_ZERO", "RAW", "INVERT"): 9942 built = Expression(self.db, opm, left) 9943 elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING", 9944 "REGEXP", "LIKE", "ILIKE", "STARTSWITH", 9945 "ENDSWITH", "ADD", "SUB", "MUL", "DIV", 9946 "MOD", "AS", "ON", "COMMA", "NOT_NULL", 9947 "COALESCE", "CONTAINS", "BELONGS"): 9948 built = Expression(self.db, opm, left, right) 9949 # expression as string 9950 elif not (left or right): built = Expression(self.db, op) 9951 else: 9952 raise SyntaxError("Operator not supported: %s" % op) 9953 9954 return built
9955
9956 - def isempty(self):
9957 return not self.select(limitby=(0,1), orderby_on_limitby=False)
9958
9959 - def count(self,distinct=None, cache=None):
9960 db = self.db 9961 if cache: 9962 cache_model, time_expire = cache 9963 sql = self._count(distinct=distinct) 9964 key = db._uri + '/' + sql 9965 if len(key)>200: key = hashlib_md5(key).hexdigest() 9966 return cache_model( 9967 key, 9968 (lambda self=self,distinct=distinct: \ 9969 db._adapter.count(self.query,distinct)), 9970 time_expire) 9971 return db._adapter.count(self.query,distinct)
9972
9973 - def select(self, *fields, **attributes):
9974 adapter = self.db._adapter 9975 tablenames = adapter.tables(self.query, 9976 attributes.get('join',None), 9977 attributes.get('left',None), 9978 attributes.get('orderby',None), 9979 attributes.get('groupby',None)) 9980 fields = adapter.expand_all(fields, tablenames) 9981 return adapter.select(self.query,fields,attributes)
9982
9983 - def nested_select(self,*fields,**attributes):
9984 return Expression(self.db,self._select(*fields,**attributes))
9985
9986 - def delete(self):
9987 db = self.db 9988 tablename = db._adapter.get_table(self.query) 9989 table = db[tablename] 9990 if any(f(self) for f in table._before_delete): return 0 9991 ret = db._adapter.delete(tablename,self.query) 9992 ret and [f(self) for f in table._after_delete] 9993 return ret
9994
9995 - def update(self, **update_fields):
9996 db = self.db 9997 tablename = db._adapter.get_table(self.query) 9998 table = db[tablename] 9999 table._attempt_upload(update_fields) 10000 if any(f(self,update_fields) for f in table._before_update): 10001 return 0 10002 fields = table._listify(update_fields,update=True) 10003 if not fields: 10004 raise SyntaxError("No fields to update") 10005 ret = db._adapter.update("%s" % table,self.query,fields) 10006 ret and [f(self,update_fields) for f in table._after_update] 10007 return ret
10008
10009 - def update_naive(self, **update_fields):
10010 """ 10011 same as update but does not call table._before_update and _after_update 10012 """ 10013 tablename = self.db._adapter.get_table(self.query) 10014 table = self.db[tablename] 10015 fields = table._listify(update_fields,update=True) 10016 if not fields: raise SyntaxError("No fields to update") 10017 10018 ret = self.db._adapter.update("%s" % table,self.query,fields) 10019 return ret
10020
10021 - def validate_and_update(self, **update_fields):
10022 tablename = self.db._adapter.get_table(self.query) 10023 response = Row() 10024 response.errors = Row() 10025 new_fields = copy.copy(update_fields) 10026 for key,value in update_fields.iteritems(): 10027 value,error = self.db[tablename][key].validate(value) 10028 if error: 10029 response.errors[key] = error 10030 else: 10031 new_fields[key] = value 10032 table = self.db[tablename] 10033 if response.errors: 10034 response.updated = None 10035 else: 10036 if not any(f(self,new_fields) for f in table._before_update): 10037 fields = table._listify(new_fields,update=True) 10038 if not fields: raise SyntaxError("No fields to update") 10039 ret = self.db._adapter.update(tablename,self.query,fields) 10040 ret and [f(self,new_fields) for f in table._after_update] 10041 else: 10042 ret = 0 10043 response.updated = ret 10044 return response
10045
10046 - def delete_uploaded_files(self, upload_fields=None):
10047 table = self.db[self.db._adapter.tables(self.query)[0]] 10048 # ## mind uploadfield==True means file is not in DB 10049 if upload_fields: 10050 fields = upload_fields.keys() 10051 else: 10052 fields = table.fields 10053 fields = [f for f in fields if table[f].type == 'upload' 10054 and table[f].uploadfield == True 10055 and table[f].autodelete] 10056 if not fields: 10057 return False 10058 for record in self.select(*[table[f] for f in fields]): 10059 for fieldname in fields: 10060 field = table[fieldname] 10061 oldname = record.get(fieldname, None) 10062 if not oldname: 10063 continue 10064 if upload_fields and oldname == upload_fields[fieldname]: 10065 continue 10066 if field.custom_delete: 10067 field.custom_delete(oldname) 10068 else: 10069 uploadfolder = field.uploadfolder 10070 if not uploadfolder: 10071 uploadfolder = pjoin( 10072 self.db._adapter.folder, '..', 'uploads') 10073 if field.uploadseparate: 10074 items = oldname.split('.') 10075 uploadfolder = pjoin( 10076 uploadfolder, 10077 "%s.%s" % (items[0], items[1]), 10078 items[2][:2]) 10079 oldpath = pjoin(uploadfolder, oldname) 10080 if exists(oldpath): 10081 os.unlink(oldpath) 10082 return False
10083
10084 -class RecordUpdater(object):
10085 - def __init__(self, colset, table, id):
10086 self.colset, self.db, self.tablename, self.id = \ 10087 colset, table._db, table._tablename, id
10088
10089 - def __call__(self, **fields):
10090 colset, db, tablename, id = self.colset, self.db, self.tablename, self.id 10091 table = db[tablename] 10092 newfields = fields or dict(colset) 10093 for fieldname in newfields.keys(): 10094 if not fieldname in table.fields or table[fieldname].type=='id': 10095 del newfields[fieldname] 10096 table._db(table._id==id,ignore_common_filters=True).update(**newfields) 10097 colset.update(newfields) 10098 return colset
10099
10100 -class RecordDeleter(object):
10101 - def __init__(self, table, id):
10102 self.db, self.tablename, self.id = table._db, table._tablename, id
10103 - def __call__(self):
10104 return self.db(self.db[self.tablename]._id==self.id).delete()
10105
10106 -class LazySet(object):
10107 - def __init__(self, field, id):
10108 self.db, self.tablename, self.fieldname, self.id = \ 10109 field.db, field._tablename, field.name, id
10110 - def _getset(self):
10111 query = self.db[self.tablename][self.fieldname]==self.id 10112 return Set(self.db,query)
10113 - def __repr__(self):
10114 return repr(self._getset())
10115 - def __call__(self, query, ignore_common_filters=False):
10116 return self._getset()(query, ignore_common_filters)
10117 - def _count(self,distinct=None):
10118 return self._getset()._count(distinct)
10119 - def _select(self, *fields, **attributes):
10120 return self._getset()._select(*fields,**attributes)
10121 - def _delete(self):
10122 return self._getset()._delete()
10123 - def _update(self, **update_fields):
10124 return self._getset()._update(**update_fields)
10125 - def isempty(self):
10126 return self._getset().isempty()
10127 - def count(self,distinct=None, cache=None):
10128 return self._getset().count(distinct,cache)
10129 - def select(self, *fields, **attributes):
10130 return self._getset().select(*fields,**attributes)
10131 - def nested_select(self,*fields,**attributes):
10132 return self._getset().nested_select(*fields,**attributes)
10133 - def delete(self):
10134 return self._getset().delete()
10135 - def update(self, **update_fields):
10136 return self._getset().update(**update_fields)
10137 - def update_naive(self, **update_fields):
10138 return self._getset().update_naive(**update_fields)
10139 - def validate_and_update(self, **update_fields):
10140 return self._getset().validate_and_update(**update_fields)
10141 - def delete_uploaded_files(self, upload_fields=None):
10142 return self._getset().delete_uploaded_files(upload_fields)
10143
10144 -class VirtualCommand(object):
10145 - def __init__(self,method,row):
10146 self.method=method 10147 self.row=row
10148 - def __call__(self,*args,**kwargs):
10149 return self.method(self.row,*args,**kwargs)
10150
10151 -def lazy_virtualfield(f):
10152 f.__lazy__ = True 10153 return f
10154
10155 -class Rows(object):
10156 10157 """ 10158 A wrapper for the return value of a select. It basically represents a table. 10159 It has an iterator and each row is represented as a dictionary. 10160 """ 10161 10162 # ## TODO: this class still needs some work to care for ID/OID 10163
10164 - def __init__( 10165 self, 10166 db=None, 10167 records=[], 10168 colnames=[], 10169 compact=True, 10170 rawrows=None 10171 ):
10172 self.db = db 10173 self.records = records 10174 self.colnames = colnames 10175 self.compact = compact 10176 self.response = rawrows
10177
10178 - def __repr__(self):
10179 return '<Rows (%s)>' % len(self.records)
10180
10181 - def setvirtualfields(self,**keyed_virtualfields):
10182 """ 10183 db.define_table('x',Field('number','integer')) 10184 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 10185 10186 from gluon.dal import lazy_virtualfield 10187 10188 class MyVirtualFields(object): 10189 # normal virtual field (backward compatible, discouraged) 10190 def normal_shift(self): return self.x.number+1 10191 # lazy virtual field (because of @staticmethod) 10192 @lazy_virtualfield 10193 def lazy_shift(instance,row,delta=4): return row.x.number+delta 10194 db.x.virtualfields.append(MyVirtualFields()) 10195 10196 for row in db(db.x).select(): 10197 print row.number, row.normal_shift, row.lazy_shift(delta=7) 10198 """ 10199 if not keyed_virtualfields: 10200 return self 10201 for row in self.records: 10202 for (tablename,virtualfields) in keyed_virtualfields.iteritems(): 10203 attributes = dir(virtualfields) 10204 if not tablename in row: 10205 box = row[tablename] = Row() 10206 else: 10207 box = row[tablename] 10208 updated = False 10209 for attribute in attributes: 10210 if attribute[0] != '_': 10211 method = getattr(virtualfields,attribute) 10212 if hasattr(method,'__lazy__'): 10213 box[attribute]=VirtualCommand(method,row) 10214 elif type(method)==types.MethodType: 10215 if not updated: 10216 virtualfields.__dict__.update(row) 10217 updated = True 10218 box[attribute]=method() 10219 return self
10220
10221 - def __and__(self,other):
10222 if self.colnames!=other.colnames: 10223 raise Exception('Cannot & incompatible Rows objects') 10224 records = self.records+other.records 10225 return Rows(self.db,records,self.colnames)
10226
10227 - def __or__(self,other):
10228 if self.colnames!=other.colnames: 10229 raise Exception('Cannot | incompatible Rows objects') 10230 records = self.records 10231 records += [record for record in other.records \ 10232 if not record in records] 10233 return Rows(self.db,records,self.colnames)
10234
10235 - def __nonzero__(self):
10236 if len(self.records): 10237 return 1 10238 return 0
10239
10240 - def __len__(self):
10241 return len(self.records)
10242
10243 - def __getslice__(self, a, b):
10244 return Rows(self.db,self.records[a:b],self.colnames,compact=self.compact)
10245
10246 - def __getitem__(self, i):
10247 row = self.records[i] 10248 keys = row.keys() 10249 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10250 return row[row.keys()[0]] 10251 return row
10252
10253 - def __iter__(self):
10254 """ 10255 iterator over records 10256 """ 10257 10258 for i in xrange(len(self)): 10259 yield self[i]
10260
10261 - def __str__(self):
10262 """ 10263 serializes the table into a csv file 10264 """ 10265 10266 s = StringIO.StringIO() 10267 self.export_to_csv_file(s) 10268 return s.getvalue()
10269
10270 - def first(self):
10271 if not self.records: 10272 return None 10273 return self[0]
10274
10275 - def last(self):
10276 if not self.records: 10277 return None 10278 return self[-1]
10279
10280 - def find(self,f,limitby=None):
10281 """ 10282 returns a new Rows object, a subset of the original object, 10283 filtered by the function f 10284 """ 10285 if not self: 10286 return Rows(self.db, [], self.colnames) 10287 records = [] 10288 if limitby: 10289 a,b = limitby 10290 else: 10291 a,b = 0,len(self) 10292 k = 0 10293 for row in self: 10294 if f(row): 10295 if a<=k: records.append(row) 10296 k += 1 10297 if k==b: break 10298 return Rows(self.db, records, self.colnames)
10299
10300 - def exclude(self, f):
10301 """ 10302 removes elements from the calling Rows object, filtered by the function f, 10303 and returns a new Rows object containing the removed elements 10304 """ 10305 if not self.records: 10306 return Rows(self.db, [], self.colnames) 10307 removed = [] 10308 i=0 10309 while i<len(self): 10310 row = self[i] 10311 if f(row): 10312 removed.append(self.records[i]) 10313 del self.records[i] 10314 else: 10315 i += 1 10316 return Rows(self.db, removed, self.colnames)
10317
10318 - def sort(self, f, reverse=False):
10319 """ 10320 returns a list of sorted elements (not sorted in place) 10321 """ 10322 rows = Rows(self.db,[],self.colnames,compact=False) 10323 rows.records = sorted(self,key=f,reverse=reverse) 10324 return rows
10325
10326 - def group_by_value(self, *fields, **args):
10327 """ 10328 regroups the rows, by one of the fields 10329 """ 10330 one_result = False 10331 if 'one_result' in args: 10332 one_result = args['one_result'] 10333 10334 def build_fields_struct(row, fields, num, groups): 10335 ''' helper function: 10336 ''' 10337 if num > len(fields)-1: 10338 if one_result: 10339 return row 10340 else: 10341 return [row] 10342 10343 key = fields[num] 10344 value = row[key] 10345 10346 if value not in groups: 10347 groups[value] = build_fields_struct(row, fields, num+1, {}) 10348 else: 10349 struct = build_fields_struct(row, fields, num+1, groups[ value ]) 10350 10351 # still have more grouping to do 10352 if type(struct) == type(dict()): 10353 groups[value].update() 10354 # no more grouping, first only is off 10355 elif type(struct) == type(list()): 10356 groups[value] += struct 10357 # no more grouping, first only on 10358 else: 10359 groups[value] = struct 10360 10361 return groups
10362 10363 if len(fields) == 0: 10364 return self 10365 10366 # if select returned no results 10367 if not self.records: 10368 return {} 10369 10370 grouped_row_group = dict() 10371 10372 # build the struct 10373 for row in self: 10374 build_fields_struct(row, fields, 0, grouped_row_group) 10375 10376 return grouped_row_group
10377
10378 - def render(self, i=None, fields=None):
10379 """ 10380 Takes an index and returns a copy of the indexed row with values 10381 transformed via the "represent" attributes of the associated fields. 10382 10383 If no index is specified, a generator is returned for iteration 10384 over all the rows. 10385 10386 fields -- a list of fields to transform (if None, all fields with 10387 "represent" attributes will be transformed). 10388 """ 10389 10390 10391 if i is None: 10392 return (self.repr(i, fields=fields) for i in range(len(self))) 10393 import sqlhtml 10394 row = copy.deepcopy(self.records[i]) 10395 keys = row.keys() 10396 tables = [f.tablename for f in fields] if fields \ 10397 else [k for k in keys if k != '_extra'] 10398 for table in tables: 10399 repr_fields = [f.name for f in fields if f.tablename == table] \ 10400 if fields else [k for k in row[table].keys() 10401 if (hasattr(self.db[table], k) and 10402 isinstance(self.db[table][k], Field) 10403 and self.db[table][k].represent)] 10404 for field in repr_fields: 10405 row[table][field] = sqlhtml.represent( 10406 self.db[table][field], row[table][field], row[table]) 10407 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10408 return row[keys[0]] 10409 return row
10410
10411 - def as_list(self, 10412 compact=True, 10413 storage_to_dict=True, 10414 datetime_to_str=False, 10415 custom_types=None):
10416 """ 10417 returns the data as a list or dictionary. 10418 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10419 :param datetime_to_str: convert datetime fields as strings (default False) 10420 """ 10421 (oc, self.compact) = (self.compact, compact) 10422 if storage_to_dict: 10423 items = [item.as_dict(datetime_to_str, custom_types) for item in self] 10424 else: 10425 items = [item for item in self] 10426 self.compact = compact 10427 return items
10428 10429
10430 - def as_dict(self, 10431 key='id', 10432 compact=True, 10433 storage_to_dict=True, 10434 datetime_to_str=False, 10435 custom_types=None):
10436 """ 10437 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 10438 10439 :param key: the name of the field to be used as dict key, normally the id 10440 :param compact: ? (default True) 10441 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10442 :param datetime_to_str: convert datetime fields as strings (default False) 10443 """ 10444 10445 # test for multiple rows 10446 multi = False 10447 f = self.first() 10448 if f and isinstance(key, basestring): 10449 multi = any([isinstance(v, f.__class__) for v in f.values()]) 10450 if (not "." in key) and multi: 10451 # No key provided, default to int indices 10452 def new_key(): 10453 i = 0 10454 while True: 10455 yield i 10456 i += 1
10457 key_generator = new_key() 10458 key = lambda r: key_generator.next() 10459 10460 rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) 10461 if isinstance(key,str) and key.count('.')==1: 10462 (table, field) = key.split('.') 10463 return dict([(r[table][field],r) for r in rows]) 10464 elif isinstance(key,str): 10465 return dict([(r[key],r) for r in rows]) 10466 else: 10467 return dict([(key(r),r) for r in rows]) 10468
10469 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
10470 """ 10471 export data to csv, the first line contains the column names 10472 10473 :param ofile: where the csv must be exported to 10474 :param null: how null values must be represented (default '<NULL>') 10475 :param delimiter: delimiter to separate values (default ',') 10476 :param quotechar: character to use to quote string values (default '"') 10477 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 10478 :param represent: use the fields .represent value (default False) 10479 :param colnames: list of column names to use (default self.colnames) 10480 This will only work when exporting rows objects!!!! 10481 DO NOT use this with db.export_to_csv() 10482 """ 10483 delimiter = kwargs.get('delimiter', ',') 10484 quotechar = kwargs.get('quotechar', '"') 10485 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 10486 represent = kwargs.get('represent', False) 10487 writer = csv.writer(ofile, delimiter=delimiter, 10488 quotechar=quotechar, quoting=quoting) 10489 colnames = kwargs.get('colnames', self.colnames) 10490 write_colnames = kwargs.get('write_colnames',True) 10491 # a proper csv starting with the column names 10492 if write_colnames: 10493 writer.writerow(colnames) 10494 10495 def none_exception(value): 10496 """ 10497 returns a cleaned up value that can be used for csv export: 10498 - unicode text is encoded as such 10499 - None values are replaced with the given representation (default <NULL>) 10500 """ 10501 if value is None: 10502 return null 10503 elif isinstance(value, unicode): 10504 return value.encode('utf8') 10505 elif isinstance(value,Reference): 10506 return long(value) 10507 elif hasattr(value, 'isoformat'): 10508 return value.isoformat()[:19].replace('T', ' ') 10509 elif isinstance(value, (list,tuple)): # for type='list:..' 10510 return bar_encode(value) 10511 return value
10512 10513 for record in self: 10514 row = [] 10515 for col in colnames: 10516 if not REGEX_TABLE_DOT_FIELD.match(col): 10517 row.append(record._extra[col]) 10518 else: 10519 (t, f) = col.split('.') 10520 field = self.db[t][f] 10521 if isinstance(record.get(t, None), (Row,dict)): 10522 value = record[t][f] 10523 else: 10524 value = record[f] 10525 if field.type=='blob' and not value is None: 10526 value = base64.b64encode(value) 10527 elif represent and field.represent: 10528 value = field.represent(value) 10529 row.append(none_exception(value)) 10530 writer.writerow(row) 10531
10532 - def xml(self,strict=False,row_name='row',rows_name='rows'):
10533 """ 10534 serializes the table using sqlhtml.SQLTABLE (if present) 10535 """ 10536 10537 if strict: 10538 ncols = len(self.colnames) 10539 return '<%s>\n%s\n</%s>' % (rows_name, 10540 '\n'.join(row.as_xml(row_name=row_name, 10541 colnames=self.colnames) for 10542 row in self), rows_name) 10543 10544 import sqlhtml 10545 return sqlhtml.SQLTABLE(self).xml()
10546
10547 - def as_xml(self,row_name='row',rows_name='rows'):
10548 return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
10549
10550 - def as_json(self, mode='object', default=None):
10551 """ 10552 serializes the rows to a JSON list or object with objects 10553 mode='object' is not implemented (should return a nested 10554 object structure) 10555 """ 10556 10557 items = [record.as_json(mode=mode, default=default, 10558 serialize=False, 10559 colnames=self.colnames) for 10560 record in self] 10561 10562 if have_serializers: 10563 return serializers.json(items, 10564 default=default or 10565 serializers.custom_json) 10566 elif simplejson: 10567 return simplejson.dumps(items) 10568 else: 10569 raise RuntimeError("missing simplejson")
10570 10571 # for consistent naming yet backwards compatible 10572 as_csv = __str__ 10573 json = as_json 10574
10575 10576 ################################################################################ 10577 # dummy function used to define some doctests 10578 ################################################################################ 10579 10580 -def test_all():
10581 """ 10582 10583 >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\") 10584 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 10585 >>> tmp = db.define_table('users',\ 10586 Field('stringf', 'string', length=32, required=True),\ 10587 Field('booleanf', 'boolean', default=False),\ 10588 Field('passwordf', 'password', notnull=True),\ 10589 Field('uploadf', 'upload'),\ 10590 Field('blobf', 'blob'),\ 10591 Field('integerf', 'integer', unique=True),\ 10592 Field('doublef', 'double', unique=True,notnull=True),\ 10593 Field('jsonf', 'json'),\ 10594 Field('datef', 'date', default=datetime.date.today()),\ 10595 Field('timef', 'time'),\ 10596 Field('datetimef', 'datetime'),\ 10597 migrate='test_user.table') 10598 10599 Insert a field 10600 10601 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 10602 uploadf=None, integerf=5, doublef=3.14,\ 10603 jsonf={"j": True},\ 10604 datef=datetime.date(2001, 1, 1),\ 10605 timef=datetime.time(12, 30, 15),\ 10606 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 10607 1 10608 10609 Drop the table 10610 10611 >>> db.users.drop() 10612 10613 Examples of insert, select, update, delete 10614 10615 >>> tmp = db.define_table('person',\ 10616 Field('name'),\ 10617 Field('birth','date'),\ 10618 migrate='test_person.table') 10619 >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22') 10620 >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21') 10621 10622 commented len(db().select(db.person.ALL)) 10623 commented 2 10624 10625 >>> me = db(db.person.id==person_id).select()[0] # test select 10626 >>> me.name 10627 'Massimo' 10628 >>> db.person[2].name 10629 'Massimo' 10630 >>> db.person(2).name 10631 'Massimo' 10632 >>> db.person(name='Massimo').name 10633 'Massimo' 10634 >>> db.person(db.person.name=='Massimo').name 10635 'Massimo' 10636 >>> row = db.person[2] 10637 >>> row.name == row['name'] == row['person.name'] == row('person.name') 10638 True 10639 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 10640 1 10641 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 10642 1 10643 10644 Update a single record 10645 10646 >>> me.update_record(name=\"Max\") 10647 <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}> 10648 >>> me.name 10649 'Max' 10650 10651 Examples of complex search conditions 10652 10653 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 10654 1 10655 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 10656 1 10657 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 10658 1 10659 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 10660 >>> me.name 10661 'Max' 10662 10663 Examples of search conditions using extract from date/datetime/time 10664 10665 >>> len(db(db.person.birth.month()==12).select()) 10666 1 10667 >>> len(db(db.person.birth.year()>1900).select()) 10668 1 10669 10670 Example of usage of NULL 10671 10672 >>> len(db(db.person.birth==None).select()) ### test NULL 10673 0 10674 >>> len(db(db.person.birth!=None).select()) ### test NULL 10675 1 10676 10677 Examples of search conditions using lower, upper, and like 10678 10679 >>> len(db(db.person.name.upper()=='MAX').select()) 10680 1 10681 >>> len(db(db.person.name.like('%ax')).select()) 10682 1 10683 >>> len(db(db.person.name.upper().like('%AX')).select()) 10684 1 10685 >>> len(db(~db.person.name.upper().like('%AX')).select()) 10686 0 10687 10688 orderby, groupby and limitby 10689 10690 >>> people = db().select(db.person.name, orderby=db.person.name) 10691 >>> order = db.person.name|~db.person.birth 10692 >>> people = db().select(db.person.name, orderby=order) 10693 10694 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 10695 10696 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 10697 10698 Example of one 2 many relation 10699 10700 >>> tmp = db.define_table('dog',\ 10701 Field('name'),\ 10702 Field('birth','date'),\ 10703 Field('owner',db.person),\ 10704 migrate='test_dog.table') 10705 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 10706 1 10707 10708 A simple JOIN 10709 10710 >>> len(db(db.dog.owner==db.person.id).select()) 10711 1 10712 10713 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 10714 1 10715 10716 Drop tables 10717 10718 >>> db.dog.drop() 10719 >>> db.person.drop() 10720 10721 Example of many 2 many relation and Set 10722 10723 >>> tmp = db.define_table('author', Field('name'),\ 10724 migrate='test_author.table') 10725 >>> tmp = db.define_table('paper', Field('title'),\ 10726 migrate='test_paper.table') 10727 >>> tmp = db.define_table('authorship',\ 10728 Field('author_id', db.author),\ 10729 Field('paper_id', db.paper),\ 10730 migrate='test_authorship.table') 10731 >>> aid = db.author.insert(name='Massimo') 10732 >>> pid = db.paper.insert(title='QCD') 10733 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 10734 10735 Define a Set 10736 10737 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 10738 >>> rows = authored_papers.select(db.author.name, db.paper.title) 10739 >>> for row in rows: print row.author.name, row.paper.title 10740 Massimo QCD 10741 10742 Example of search condition using belongs 10743 10744 >>> set = (1, 2, 3) 10745 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 10746 >>> print rows[0].title 10747 QCD 10748 10749 Example of search condition using nested select 10750 10751 >>> nested_select = db()._select(db.authorship.paper_id) 10752 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 10753 >>> print rows[0].title 10754 QCD 10755 10756 Example of expressions 10757 10758 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 10759 >>> db(mynumber).delete() 10760 0 10761 >>> for i in range(10): tmp = mynumber.insert(x=i) 10762 >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 10763 45 10764 10765 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 10766 5 10767 10768 Output in csv 10769 10770 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 10771 author.name,paper.title\r 10772 Massimo,QCD 10773 10774 Delete all leftover tables 10775 10776 >>> DAL.distributed_transaction_commit(db) 10777 10778 >>> db.mynumber.drop() 10779 >>> db.authorship.drop() 10780 >>> db.author.drop() 10781 >>> db.paper.drop() 10782 """
10783 ################################################################################ 10784 # deprecated since the new DAL; here only for backward compatibility 10785 ################################################################################ 10786 10787 SQLField = Field 10788 SQLTable = Table 10789 SQLXorable = Expression 10790 SQLQuery = Query 10791 SQLSet = Set 10792 SQLRows = Rows 10793 SQLStorage = Row 10794 SQLDB = DAL 10795 GQLDB = DAL 10796 DAL.Field = Field # was necessary in gluon/globals.py session.connect 10797 DAL.Table = Table # was necessary in gluon/globals.py session.connect
10798 10799 ################################################################################ 10800 # Geodal utils 10801 ################################################################################ 10802 10803 -def geoPoint(x,y):
10804 return "POINT (%f %f)" % (x,y)
10805
10806 -def geoLine(*line):
10807 return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
10808
10809 -def geoPolygon(*line):
10810 return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
10811 10812 ################################################################################ 10813 # run tests 10814 ################################################################################ 10815 10816 if __name__ == '__main__': 10817 import doctest 10818 doctest.testmod() 10819